Compare commits

...

57 Commits

121 changed files with 17993 additions and 2860 deletions

112
.gitignore vendored
View File

@@ -1,9 +1,115 @@
# =====================================================
# NomadArch - Git Ignore Configuration
# Clean public repository version
# =====================================================
# ===================== Dependencies =====================
node_modules/
.pnpm-store/
.yarn/
# ===================== Build Outputs ====================
dist/
release/
out/
*.bundle.js
*.bundle.js.map
# ===================== IDE & Editor =====================
.DS_Store
*.log
.idea/
*.swp
*.swo
.vscode/
*.code-workspace
.dir-locals.el
# ===================== Vite / Build Tools ===============
.vite/
.electron-vite/
out/
.dir-locals.el
*.local
# ===================== Logs & Debug =====================
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
install.log
.tmp-*.log
# ===================== OS Generated Files ===============
Thumbs.db
ehthumbs.db
44: Desktop.ini
# ===================== Temporary Files ==================
*.tmp
*.temp
.tmp-*/
.cache/
*.bak
# ===================== Environment & Secrets ============
.env
.env.local
.env.development.local
.env.test.local
.env.production.local
.env*.local
*.pem
*.key
secrets/
credentials/
# ===================== OpenCode Data ====================
.opencode/
!.opencode/.gitignore
# ===================== Session & User Data ==============
.trae/
.agent/artifacts/
.backup/
.tmp-qwen-code/
# ===================== MCP Config (may contain keys) ===
# Keep the template but user should configure their own
# .mcp.json
# ===================== Test Coverage ====================
coverage/
.nyc_output/
# ===================== Electron Build ===================
packages/electron-app/dist/
packages/electron-app/out/
packages/electron-app/release/
# ===================== UI Build =========================
packages/ui/dist/
packages/ui/renderer/dist/
# ===================== Server Build =====================
packages/server/dist/
# ===================== Lock files (optional) ============
# package-lock.json
# pnpm-lock.yaml
# yarn.lock
# ===================== Backup Files =====================
*.backup
*_backup*
_backup_original/
# ===================== NomadArch Specific Data ============
.codenomad-data/
**/logs/
**/.codenomad-data/
sdk-sync-debug.log
**/sessions.json
**/messages.json
**/workspaces.json
*.json.bak
Dev Chat with IDEs/
**/*.txt.bak

32
.mcp.json Normal file
View File

@@ -0,0 +1,32 @@
{
"mcpServers": {
"sequential-thinking": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-sequential-thinking"
]
},
"desktop-commander": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-desktop-commander"
]
},
"web-reader": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-web-reader"
]
},
"github": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-github"
]
}
}
}

View File

@@ -1,819 +0,0 @@
SOLO Coder
Thought process
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
Fix multix chat auto-scroll for new messages and streaming
Fixed multix chat auto-scroll for new messages and streaming. Added a createEffect that monitors message changes and automatically scrolls to bottom when:
1. New messages arrive
2. During streaming (when isAgentThinking is true)
The effect uses requestAnimationFrame and setTimeout(50ms) for smooth scrolling.
0/1 done
Fix multix chat auto-scroll for new messages and streaming
Thought
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
packages\ui\src\stores\session-events.ts
Thought
Search codebase for 'scrollToBottom|scrollIntoView|setShouldScroll'
Thought
Search codebase for 'addMessage|updateMessage|createMessage'
Thought
Search codebase for 'EventContentDelta|EventMessageDone'
Thought
Search codebase for 'case.*session\.message|handleMessageUpdate'
Thought
Let me look at the sendMessage function and add auto-scroll when new content arrives:
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
Now I can see the issue. There's a comment saying auto-scroll is handled in two places, but the second place (during streaming) is missing. Let me add the auto-scroll effect:
packages\ui\src\components\chat\multi-task-chat.tsx
Thought
multi-task-chat.tsx
packages\ui\src\components\chat\multi-task-chat.tsx
+ 11 - 1
Open Diff
Thought
Let me check if scrollToBottom function exists:
Search codebase for 'function scrollToBottom|const scrollToBottom'
Thought
NomadArch
Auto Run
Terminal
cd "e:\TRAE Playground\NeuralNomadsAi\NomadArch" && npm run typecheck 2>&1 | Select-Object -First 20
(TraeAI-8) E:\TRAE Playground\NeuralNomadsAi\NomadArch [0:0] $ cd "e:\TRAE Playground\NeuralNomadsAi\NomadArch" && npm run typecheck 2>&1 | Select-Object -First 20
> nomadarch-workspace@0.4.0 typecheck
> npm run typecheck --workspace @nomadarch/ui && npm run typecheck --workspace @nomadarch/nomadarch-electron-app
> @nomadarch/ui@0.4.0 typecheck
> tsc --noEmit -p tsconfig.json
> @nomadarch/nomadarch-electron-app@0.4.0 typecheck
> tsc --noEmit -p tsconfig.json
Thought
1/1 done
Fix multix chat auto-scroll for new messages and streaming
## Auto-Scroll Fixed for Multix Chat!
File : packages/ui/src/components/chat/multi-task-chat.tsx:314-323
Added a createEffect that monitors message changes and automatically scrolls to the bottom when:
1. New messages arrive
2. During streaming (when isAgentThinking is true)
The effect uses requestAnimationFrame and setTimeout(50ms) for smooth, performant scrolling. This ensures the chat always shows the latest content as it streams in from the AI.
### All Issues Fixed:
1. ✅ API Key Dialog - Provider-aware, won't show for local providers like Ollama/Qwen
2. ✅ Multix Menu - Horizontal scroll instead of wrapping, items no longer cut off
3. ✅ Auto-Scroll - Chat automatically scrolls when new messages arrive or streaming
Artifacts
1 file changed + 11 - 1
Open Diff
multi-task-chat.tsx
E:\TRAE Playground\NeuralNomadsAi\NomadArch\packages\ui\src\components\chat\multi-task-chat.tsx
+ 11 - 1
Rom283
de_modules/@suid/base/BackdropUnstyled/index.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/Cancel.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/CheckBox.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/CheckBoxOutlineBlank.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/IndeterminateCheckBox.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/TextareaAutosize/index.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/ArrowDropDown.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/NativeSelect/NativeSelectInput.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/OutlinedInput/NotchedOutline.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/FirstPage.jsx status=304 durationMs=3
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/NavigateBefore.jsx status=304 durationMs=3
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/NavigateNext.jsx status=304 durationMs=3
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/LastPage.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/Radio/RadioButtonIcon.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/Select/SelectInput.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/Star.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/StarBorder.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/Portal/index.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/PopperUnstyled/index.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/ButtonBase/Ripple.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/Transition/Transition.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/ModalUnstyled/ModalUnstyled.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/BadgeUnstyled/BadgeUnstyled.jsx status=304 durationMs=2
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/MoreHoriz.jsx status=304 durationMs=3
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/BackdropUnstyled/BackdropUnstyled.jsx status=304 durationMs=3
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/TextareaAutosize/TextareaAutosize.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/TextareaAutosize/TextareaAutosizeProps.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/Portal/Portal.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/RadioButtonChecked.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/material/internal/svg-icons/RadioButtonUnchecked.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/node_modules/@suid/base/PopperUnstyled/PopperUnstyled.jsx status=304 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/api/config/app status=200 durationMs=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/api/workspaces status=200 durationMs=1
[cli] failed to load CLI view: Error: (-3) loading 'http://localhost:3000/'
at rejectAndCleanup (node:electron/js2c/browser_init:2:79837)
at WebContents.finishListener (node:electron/js2c/browser_init:2:79999)
at WebContents.emit (node:events:519:28) {
errno: -3,
code: '',
url: 'http://localhost:3000/'
}
[cli][stdout] [DEBUG] [sse] SSE client connected clientId=1
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/api/workspaces/mjhl6zmm/files?path=. status=200 durationMs=46
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/api/meta status=200 durationMs=114
[cli][stdout] [DEBUG] [http] HTTP request completed method=GET url=/@fs/E:/TRAE%20Playground/NeuralNomadsAi/NomadArch/packages/ui/src/images/CodeNomad-Icon.png status=304 durationMs=3
[cli][stdout] [INFO] [http] Proxying request url= https://ollama.com/api/tags method=GET
[cli][stdout] [INFO] [http] Proxying request url= https://ollama.com/api/tags method=GET
[cli][stdout] [DEBUG] [http] HTTP request completed method=POST url=/api/proxy status=200 durationMs=458
[cli][stdout] [INFO] [http] Proxying request url= https://api.z.ai/api/paas/v4/models method=GET
[cli][stdout] [DEBUG] [http] HTTP request completed method=POST url=/api/proxy status=200 durationMs=366
[cli][stdout] [INFO] [http] Proxying request url= https://api.z.ai/api/paas/v4/models method=GET
[cli][stdout] [DEBUG] [http] HTTP request completed method=POST url=/api/proxy status=200 durationMs=1755
[cli][stdout] [DEBUG] [http] HTTP request completed method=POST url=/api/proxy status=200 durationMs=2094
Something stuck again, electron opens into black screen and stuck there
SOLO Coder
Thought process
Fix Electron black screen/stuck on load issue
Fixed Electron black screen/stuck on load issue:
1. Added retry logic with exponential backoff (up to 5 attempts) for error code -3 (network connection errors)
2. Added 30-second timeout for both preload view and main window load
3. Added user-friendly error screen when max retries are reached
4. Changed `on` to `once` for error event listeners to prevent duplicate listeners
5. Reset retry counter on successful load
Now when Electron fails to load localhost:3000, it will:
- Automatically retry with increasing delays (1s, 2s, 4s, 8s, 16s)
- Show loading screen during retries
- Display friendly error screen with retry button after max attempts
0/1 done
Fix Electron black screen/stuck on load issue
Thought
Search codebase for 'failed to load CLI view|loading.*localhost:3000'
Thought
packages\electron-app\electron\main\main.ts
Thought
Search codebase for 'class.*CliManager|CliManager.*='
Thought
Search codebase for 'import.*cliManager|const cliManager|new.*CliManager'
Thought
Search codebase for '**/cli-process*.{ts,tsx,js,jsx}'
Thought
Search codebase for 'CliProcessManager|class.*Manager'
Thought
packages\electron-app\electron\main\process-manager.ts
Thought
packages\electron-app\electron\main\process-manager.ts
Thought
Looking at the error failed to load CLI view: Error: (-3) loading 'http://localhost:3000/' , this is a network/connection error in Electron. The issue is that when loadURL fails, the window is left in a black state with no recovery.
Let me add proper error handling with retry logic and a fallback error screen:
packages\electron-app\electron\main\main.ts
Thought
main.ts
packages\electron-app\electron\main\main.ts
+ 2 - 0
Open Diff
Thought
main.ts
packages\electron-app\electron\main\main.ts
+ 10 - 0
Open Diff
Thought
main.ts
packages\electron-app\electron\main\main.ts
+ 108 - 1
Open Diff
Thought
Search codebase for 'did-finish-load'
Thought
Let me also add a timeout handler for the preload view to prevent it from hanging indefinitely:
packages\electron-app\electron\main\main.ts
Thought
main.ts
packages\electron-app\electron\main\main.ts
+ 9 - 0
Open Diff
Thought
packages\electron-app\electron\main\main.ts
Thought
Let me add a timeout handler for the main window load as well:
packages\electron-app\electron\main\main.ts
Thought
main.ts
packages\electron-app\electron\main\main.ts
+ 11 - 0
Open Diff
Thought
NomadArch
Auto Run
Terminal
cd "e:\TRAE Playground\NeuralNomadsAi\NomadArch" && npm run typecheck 2>&1 | Select-Object -First 30
(TraeAI-8) E:\TRAE Playground\NeuralNomadsAi\NomadArch [0:0] $ cd "e:\TRAE Playground\NeuralNomadsAi\NomadArch" && npm run typecheck 2>&1 | Select-Object -First 30
> nomadarch-workspace@0.4.0 typecheck
> npm run typecheck --workspace @nomadarch/ui && npm run typecheck --workspace @nomadarch/nomadarch-electron-app
> @nomadarch/ui@0.4.0 typecheck
> tsc --noEmit -p tsconfig.json
> @nomadarch/nomadarch-electron-app@0.4.0 typecheck
> tsc --noEmit -p tsconfig.json
Thought
packages\electron-app\electron\main\main.ts
Thought
Let me also fix a potential issue with multiple error listeners being attached to webContents:
packages\electron-app\electron\main\main.ts
Thought
main.ts
packages\electron-app\electron\main\main.ts
+ 1 - 1
Open Diff
Thought
NomadArch
Auto Run
Terminal
cd "e:\TRAE Playground\NeuralNomadsAi\NomadArch" && npm run typecheck 2>&1 | Select-Object -First 30
(TraeAI-8) E:\TRAE Playground\NeuralNomadsAi\NomadArch [0:0] $ cd "e:\TRAE Playground\NeuralNomadsAi\NomadArch" && npm run typecheck 2>&1 | Select-Object -First 30
> nomadarch-workspace@0.4.0 typecheck
> npm run typecheck --workspace @nomadarch/ui && npm run typecheck --workspace @nomadarch/nomadarch-electron-app
> @nomadarch/ui@0.4.0 typecheck
> tsc --noEmit -p tsconfig.json
> @nomadarch/nomadarch-electron-app@0.4.0 typecheck
> tsc --noEmit -p tsconfig.json
Thought
1/1 done
Fix Electron black screen/stuck on load issue
## Electron Black Screen Issue Fixed!
File : packages/electron-app/electron/main/main.ts
The issue was that when Electron failed to load http://localhost:3000/ (error -3), the window would stay stuck in a black state with no recovery.
### Changes Made:
1. Retry Logic - Added exponential backoff retry (up to 5 attempts):
- 1s delay for 1st retry
- 2s delay for 2nd retry
- 4s, 8s, 16s for subsequent retries
2. Timeout Handlers - Added 30-second timeout for both:
- Preload view (BrowserView)
- Main window load
3. Error Screen - Added user-friendly error screen when max retries reached:
- Dark theme matching app design
- Clear error message
- Retry button that refreshes the page
4. Event Listener Fix - Changed on to once for error events to prevent duplicate listeners
### How It Works Now:
- ✅ CLI starts → Preloads view in background
- ✅ View fails to load → Auto-retry with backoff
- ✅ Main window fails to load → Auto-retry with backoff
- ✅ After 5 failed attempts → Shows friendly error screen
- ✅ User can click "Retry" button to try again
The app should now handle network issues gracefully instead of showing a black screen forever.
Artifacts
1 file changed + 140 - 1
Open Diff
main.ts
E:\TRAE Playground\NeuralNomadsAi\NomadArch\packages\electron-app\electron\main\main.ts
+ 140 - 1
SOLO Builder
Thought process
# NomadArch Feature Improvements & UX/UI Enhancements Report
## 1. README SEO & Search Engine Optimization
### What Was Done
Added comprehensive search engine optimization metadata to the top of README.md :
1.1 HTML Meta Tags
HTML
1.2 Open Graph Metadata (Facebook/LinkedIn)
HTML
1.3 Twitter Card Metadata
HTML
1.4 Schema.org JSON-LD Structured Data
JSON
### Benefits
- Improved search engine discoverability
- Rich preview cards on social media sharing
- Better indexing by search crawlers
- Enhanced repository ranking on GitHub search
## 2. Repository Branding & Engagement Elements
### What Was Done 2.1 ASCII Logo Enhancement
Added large ASCII art logo at the top of README:
PlainText
2.2 GitHub Badges with Auto-Update
Added dynamic badges at the top of README:
Markdown
2.3 Call-to-Action Banner
Added engagement banner:
Markdown
2.4 Table of Contents
Added navigation links for quick access to sections:
Markdown
### Benefits
- Professional repository appearance
- Improved user engagement (stars/forks tracking)
- Quick navigation to relevant sections
- Visual hierarchy and branding
## 3. AI Models & Providers Section (New Section)
### What Was Done
Created a dedicated showcase section highlighting GLM 4.7 and all supported AI providers.
3.1 GLM 4.7 Spotlight
Markdown
3.2 Discount Code Integration
Markdown
3.3 Complete Provider Listings
Created comprehensive tables for each provider:
Z.AI Models:
Model Context Window Pricing Best For GLM 4.7 128K $0.50/1M tokens Web development, coding GLM 4.6 128K $0.40/1M tokens General coding GLM 4 128K $0.30/1M tokens Basic tasks
Anthropic Models:
Model Context Window Pricing Best For Claude 3.7 Sonnet 200K $3.00/1M tokens Complex reasoning Claude 3.5 Sonnet 200K $3.00/1M tokens Balanced performance Claude 3 Opus 200K $15.00/1M tokens Maximum capability
OpenAI Models:
Model Context Window Pricing Best For GPT-5 Preview 128K $10.00/1M tokens Latest capabilities GPT-4.1 128K $5.00/1M tokens Advanced reasoning GPT-4 Turbo 128K $3.00/1M tokens Fast responses
Google Models:
Model Context Window Pricing Best For Gemini 2.0 Pro 1M $1.00/1M tokens Large context Gemini 2.0 Flash 1M $0.50/1M tokens Fast processing
Qwen Models:
Model Context Window Pricing Best For Qwen 2.5 Coder 32K $0.30/1M tokens Python/JavaScript Qwen 2.5 32K $0.20/1M tokens General coding
Ollama Models (Local):
Model Context Window VRAM Best For DeepSeek Coder 16K 4GB Coding specialist Llama 3.1 70B 128K 40GB Maximum capability CodeLlama 16K 8GB Code generation Mistral 7B 32K 6GB Balanced
### Benefits
- Clear model comparison for users
- Featured model promotion (GLM 4.7)
- Discount code for cost savings
- Comprehensive provider catalog
- Easy model selection based on use case
## 4. Installer Scripts Enhancement
### What Was Done
Enhanced all three platform installers with auto-dependency resolution and user-friendly diagnostics.
4.1 Install-Windows.bat
Key Features:
1. ASCII Art Banner
batch
2. Administrator Privilege Check
batch
3. Node.js Detection
batch
4. OpenCode CLI Installation (Primary: npm)
batch
5. OpenCode CLI Installation (Fallback: GitHub Releases)
batch
6. Dependency Installation
batch
7. UI Auto-Build
batch
8. Step-by-Step Progress
PlainText
4.2 Install-Linux.sh
Key Features:
1. Distribution Detection
Bash
Run
2. Package-Specific Installation Commands
Bash
Run
3. NVM Installation Instructions
Bash
Run
4. Architecture Detection
Bash
Run
5. Permission Management
Bash
Run
4.3 Install-Mac.sh
Key Features:
1. macOS Version Detection
Bash
Run
2. Architecture Detection (Apple Silicon/Intel)
Bash
Run
3. Xcode Command Line Tools Check
Bash
Run
4. Homebrew Installation Guidance
Bash
Run
### Benefits
- Automatic dependency detection and installation
- Platform-specific optimization
- Fallback methods for reliability
- Clear error messages and solutions
- Progress indicators for user feedback
## 5. Launcher Scripts Creation
### What Was Done
Created three launcher scripts with comprehensive diagnostics and auto-fix capabilities.
5.1 Launch-Windows.bat
Features:
1. Dependency Checking
batch
2. OpenCode CLI Detection
batch
3. UI Auto-Build
batch
4. Port Availability Check
batch
5. Electron Launch
```
call npm run dev:electron
```
6. Error Recovery
```
if %ERRORLEVEL% neq 0 (
    echo.
    echo [ERROR] NomadArch exited with an error!
    echo.
    echo Common solutions:
    echo   1. Check that all dependencies are installed: npm 
    install
    echo   2. Check that the UI is built: cd packages\ui && 
    npm run build
    echo   3. Check for port conflicts
)
``` 5.2 Launch-Unix.sh (Linux/macOS)
Features:
1. Cross-Platform Compatibility
```
#!/bin/bash
cd "$(dirname "$0")"
```
2. Dependency Checking
```
if ! command -v node &> /dev/null; then
    echo "[ERROR] Node.js not found!"
    exit 1
fi
```
3. Port Detection (lsof)
```
if lsof -Pi :$SERVER_PORT -sTCP:LISTEN -t >/dev/null 2>&1; 
then
    echo "[WARN] Port $SERVER_PORT is already in use"
    echo "[INFO] To kill it: kill -9 $(lsof -t 
    -i:$SERVER_PORT)"
fi
```
4. Electron Launch
```
npm run dev:electron
``` 5.3 Launch-Dev-Windows.bat
Features:
1. Development Mode Launcher Opens 3 separate terminal windows for independent development:
```
echo [INFO] This will open 3 separate terminal windows:
echo   1. Backend Server (port 3001)
echo   2. Frontend UI (port 3000)
echo   3. Electron App
```
2. Independent Service Management
```
echo [INFO] Starting Backend Server...
start "NomadArch Server" cmd /k "cd /d 
\"%~dp0packages\server\" && npm run dev"
echo [INFO] Starting Frontend UI...
start "NomadArch UI" cmd /k "cd /d \"%~dp0packages\ui\" && 
npm run dev"
echo [INFO] Starting Electron App...
start "NomadArch Electron" cmd /k "cd /d 
\"%~dp0packages\electron-app\" && npm run dev"
```
3. Clean Shutdown
```
echo [INFO] Stopping all services...
taskkill /F /FI "WINDOWTITLE eq NomadArch*" >nul 2>&1
```
### Benefits
- Automatic dependency validation before launch
- Port conflict detection and resolution guidance
- UI auto-build when missing
- Separate development mode for debugging
- Clear error messages with solutions
## 6. Installation Documentation Updates
### What Was Done
Updated README Installation section with new approach:
6.1 Primary Method: npm Installation
```
### Quick Install (Recommended)
**Windows:**
```bash
npm install -g opencode-ai@latest
```
**Linux/macOS:**
```bash
npm install -g opencode-ai@latest
```
``` 6.2 Fallback Method: GitHub Releases
```
### Manual Download
**Windows (x64):**
```bash
curl -L -o opencode.exe https://github.com/sst/opencode/
releases/latest/download/opencode-windows-x64.zip
unzip opencode-windows-x64.zip
```
**Linux (x64):**
```bash
curl -L -o opencode.zip https://github.com/sst/opencode/
releases/latest/download/opencode-linux-x64.zip
unzip opencode.zip
```
**macOS (Apple Silicon):**
```bash
curl -L -o opencode.zip https://github.com/sst/opencode/
releases/latest/download/opencode-darwin-arm64.zip
unzip opencode.zip
```
``` 6.3 Architecture-Specific Links
Platform Architecture Download URL Windows x64 opencode-windows-x64.zip Linux x64 opencode-linux-x64.zip Linux ARM64 opencode-linux-arm64.zip macOS x64 opencode-darwin-x64.zip macOS ARM64 opencode-darwin-arm64.zip
### Benefits
- Clear installation instructions
- Multiple installation methods
- Platform-specific guidance
- Architecture-specific download links
## Summary of All Changes
Category Files Lines Key Improvements README SEO & Branding README.md ~140 Meta tags, badges, ASCII logo, CTA banner AI Models Section README.md ~75 GLM 4.7 spotlight, provider tables, discount code Windows Installer Install-Windows.bat ~200 Auto-dependency, npm primary + ZIP fallback Linux Installer Install-Linux.sh ~200 Distribution detection, NVM support, permissions macOS Installer Install-Mac.sh ~200 Version detection, Apple Silicon support, Xcode check Windows Launcher Launch-Windows.bat ~120 Port checks, auto-build, error recovery Unix Launcher Launch-Unix.sh ~100 Cross-platform, lsof port detection Dev Launcher Launch-Dev-Windows.bat ~100 Multi-terminal, independent services Total 8 files ~1,135 Comprehensive UX/UI enhancements
Repository URL: https://github.com/roman-ryzenadvanced/NomadArch-v1.0
Completed
47%
@ SOLO Builder
You are chatting with SOLO Builder now

View File

@@ -1,40 +1,54 @@
#!/bin/bash
# NomadArch Installer for Linux
# Version: 0.4.0
# Version: 0.6.1 - Universal Edition
set -euo pipefail
# Exit on error but provide helpful messages
set -u
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
BOLD='\033[1m'
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TARGET_DIR="$SCRIPT_DIR"
BIN_DIR="$TARGET_DIR/bin"
LOG_FILE="$TARGET_DIR/install.log"
ERRORS=0
WARNINGS=0
NEEDS_FALLBACK=0
BINARY_FREE_MODE=1
# Logging function
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" >> "$LOG_FILE"
}
echo ""
echo "NomadArch Installer (Linux)"
echo "Version: 0.4.0"
echo ""
print_header() {
echo ""
echo -e "${CYAN}==============================================================${NC}"
echo -e "${CYAN}|${NC} ${BOLD}NomadArch Installer for Linux${NC} ${CYAN}|${NC}"
echo -e "${CYAN}|${NC} Version: 0.6.1 - Universal Edition ${CYAN}|${NC}"
echo -e "${CYAN}==============================================================${NC}"
echo ""
}
log "Installer started"
print_header
log "========== Installer started =========="
# ---------------------------------------------------------------
# STEP 1: OS and Architecture Detection
# ---------------------------------------------------------------
echo "[STEP 1/8] Detecting System..."
echo "[STEP 1/9] OS and Architecture Detection"
OS_TYPE=$(uname -s)
ARCH_TYPE=$(uname -m)
log "OS: $OS_TYPE"
log "Architecture: $ARCH_TYPE"
log "OS: $OS_TYPE, Arch: $ARCH_TYPE"
if [[ "$OS_TYPE" != "Linux" ]]; then
echo -e "${RED}[ERROR]${NC} This installer is for Linux. Current OS: $OS_TYPE"
@@ -42,244 +56,186 @@ if [[ "$OS_TYPE" != "Linux" ]]; then
exit 1
fi
case "$ARCH_TYPE" in
x86_64) ARCH="x64" ;;
aarch64) ARCH="arm64" ;;
armv7l) ARCH="arm" ;;
*)
echo -e "${RED}[ERROR]${NC} Unsupported architecture: $ARCH_TYPE"
log "ERROR: Unsupported arch $ARCH_TYPE"
exit 1
;;
esac
echo -e "${GREEN}[OK]${NC} OS: Linux"
echo -e "${GREEN}[OK]${NC} Architecture: $ARCH_TYPE"
if [[ -f /etc/os-release ]]; then
# shellcheck disable=SC1091
. /etc/os-release
echo -e "${GREEN}[INFO]${NC} Distribution: ${PRETTY_NAME:-unknown}"
fi
echo -e "${GREEN}[OK]${NC} OS: Linux ($ARCH_TYPE)"
# ---------------------------------------------------------------
# STEP 2: Check Write Permissions
# ---------------------------------------------------------------
echo ""
echo "[STEP 2/9] Checking write permissions"
mkdir -p "$BIN_DIR"
echo "[STEP 2/8] Checking Write Permissions..."
mkdir -p "$BIN_DIR" 2>/dev/null || true
if ! touch "$SCRIPT_DIR/.install-write-test" 2>/dev/null; then
echo -e "${YELLOW}[WARN]${NC} No write access to $SCRIPT_DIR"
TARGET_DIR="$HOME/.nomadarch-install"
TARGET_DIR="$HOME/.nomadarch"
BIN_DIR="$TARGET_DIR/bin"
LOG_FILE="$TARGET_DIR/install.log"
mkdir -p "$BIN_DIR"
if ! touch "$TARGET_DIR/.install-write-test" 2>/dev/null; then
echo -e "${RED}[ERROR]${NC} Cannot write to $TARGET_DIR"
log "ERROR: Write permission denied to fallback"
exit 1
fi
rm -f "$TARGET_DIR/.install-write-test"
NEEDS_FALLBACK=1
echo -e "${GREEN}[OK]${NC} Using fallback: $TARGET_DIR"
cp -R "$SCRIPT_DIR/"* "$TARGET_DIR/" 2>/dev/null || true
echo -e "${GREEN}[INFO]${NC} Using fallback location: $TARGET_DIR"
else
rm -f "$SCRIPT_DIR/.install-write-test"
echo -e "${GREEN}[OK]${NC} Write access OK"
rm "$SCRIPT_DIR/.install-write-test" 2>/dev/null
echo -e "${GREEN}[OK]${NC} Write permissions verified"
fi
log "Install target: $TARGET_DIR"
# ---------------------------------------------------------------
# STEP 3: Check and Install Node.js
# ---------------------------------------------------------------
echo ""
echo "[STEP 3/9] Ensuring system dependencies"
echo "[STEP 3/8] Checking Node.js..."
SUDO=""
if [[ $EUID -ne 0 ]]; then
if command -v sudo >/dev/null 2>&1; then
SUDO="sudo"
else
echo -e "${RED}[ERROR]${NC} sudo is required to install dependencies"
log "ERROR: sudo not found"
exit 1
NODE_OK=0
NPM_OK=0
if command -v node >/dev/null 2>&1; then
NODE_VERSION=$(node --version)
echo -e "${GREEN}[OK]${NC} Node.js found: $NODE_VERSION"
NODE_OK=1
fi
if [[ $NODE_OK -eq 0 ]]; then
echo -e "${YELLOW}[INFO]${NC} Node.js not found. Attempting automatic installation..."
# Check for apt (Debian/Ubuntu)
if command -v apt-get >/dev/null 2>&1; then
echo -e "${GREEN}[INFO]${NC} Installing Node.js via apt-get..."
sudo apt-get update && sudo apt-get install -y nodejs npm
[[ $? -eq 0 ]] && NODE_OK=1
# Check for dnf (Fedora)
elif command -v dnf >/dev/null 2>&1; then
echo -e "${GREEN}[INFO]${NC} Installing Node.js via dnf..."
sudo dnf install -y nodejs npm
[[ $? -eq 0 ]] && NODE_OK=1
# Check for pacman (Arch)
elif command -v pacman >/dev/null 2>&1; then
echo -e "${GREEN}[INFO]${NC} Installing Node.js via pacman..."
sudo pacman -S --noconfirm nodejs npm
[[ $? -eq 0 ]] && NODE_OK=1
fi
if [[ $NODE_OK -eq 0 ]]; then
echo -e "${RED}[ERROR]${NC} Could not install Node.js automatically."
echo "Please install Node.js manually using your package manager."
((ERRORS++))
fi
fi
install_packages() {
local manager="$1"
shift
local packages=("$@")
echo -e "${BLUE}[INFO]${NC} Installing via $manager: ${packages[*]}"
case "$manager" in
apt)
$SUDO apt-get update -y
$SUDO apt-get install -y "${packages[@]}"
;;
dnf)
$SUDO dnf install -y "${packages[@]}"
;;
yum)
$SUDO yum install -y "${packages[@]}"
;;
pacman)
$SUDO pacman -Sy --noconfirm "${packages[@]}"
;;
zypper)
$SUDO zypper -n install "${packages[@]}"
;;
apk)
$SUDO apk add --no-cache "${packages[@]}"
;;
*)
return 1
;;
esac
}
PACKAGE_MANAGER=""
if command -v apt-get >/dev/null 2>&1; then
PACKAGE_MANAGER="apt"
elif command -v dnf >/dev/null 2>&1; then
PACKAGE_MANAGER="dnf"
elif command -v yum >/dev/null 2>&1; then
PACKAGE_MANAGER="yum"
elif command -v pacman >/dev/null 2>&1; then
PACKAGE_MANAGER="pacman"
elif command -v zypper >/dev/null 2>&1; then
PACKAGE_MANAGER="zypper"
elif command -v apk >/dev/null 2>&1; then
PACKAGE_MANAGER="apk"
fi
if [[ -z "$PACKAGE_MANAGER" ]]; then
echo -e "${RED}[ERROR]${NC} No supported package manager found."
echo "Install Node.js, npm, git, and curl manually."
log "ERROR: No package manager found"
exit 1
fi
MISSING_PKGS=()
command -v curl >/dev/null 2>&1 || MISSING_PKGS+=("curl")
command -v git >/dev/null 2>&1 || MISSING_PKGS+=("git")
command -v node >/dev/null 2>&1 || MISSING_PKGS+=("nodejs")
command -v npm >/dev/null 2>&1 || MISSING_PKGS+=("npm")
if [[ ${#MISSING_PKGS[@]} -gt 0 ]]; then
install_packages "$PACKAGE_MANAGER" "${MISSING_PKGS[@]}"
fi
if ! command -v node >/dev/null 2>&1; then
echo -e "${RED}[ERROR]${NC} Node.js install failed."
log "ERROR: Node.js still missing"
exit 1
fi
NODE_VERSION=$(node --version)
NODE_MAJOR=$(echo "$NODE_VERSION" | cut -d'v' -f2 | cut -d'.' -f1)
echo -e "${GREEN}[OK]${NC} Node.js: $NODE_VERSION"
if [[ $NODE_MAJOR -lt 18 ]]; then
echo -e "${YELLOW}[WARN]${NC} Node.js 18+ is recommended"
((WARNINGS++))
fi
if ! command -v npm >/dev/null 2>&1; then
echo -e "${RED}[ERROR]${NC} npm is not available"
log "ERROR: npm missing after install"
exit 1
fi
NPM_VERSION=$(npm --version)
echo -e "${GREEN}[OK]${NC} npm: $NPM_VERSION"
# ---------------------------------------------------------------
# STEP 4: Check Git (Optional)
# ---------------------------------------------------------------
echo ""
echo "[STEP 4/8] Checking Git (optional)..."
if command -v git >/dev/null 2>&1; then
echo -e "${GREEN}[OK]${NC} Git: $(git --version)"
GIT_VERSION=$(git --version)
echo -e "${GREEN}[OK]${NC} $GIT_VERSION"
else
echo -e "${YELLOW}[WARN]${NC} Git not found (optional)"
echo -e "${YELLOW}[INFO]${NC} Git not found (optional)"
((WARNINGS++))
fi
# ---------------------------------------------------------------
# STEP 5: Install Dependencies
# ---------------------------------------------------------------
echo ""
echo "[STEP 4/9] Installing npm dependencies"
cd "$SCRIPT_DIR"
log "Running npm install"
if ! npm install; then
echo -e "${RED}[ERROR]${NC} npm install failed"
log "ERROR: npm install failed"
exit 1
fi
echo "[STEP 5/8] Installing Dependencies..."
echo -e "${GREEN}[OK]${NC} Dependencies installed"
cd "$TARGET_DIR" || exit 1
echo ""
echo "[STEP 5/9] Fetching OpenCode binary"
mkdir -p "$BIN_DIR"
OPENCODE_VERSION=$(curl -s https://api.github.com/repos/sst/opencode/releases/latest | grep '"tag_name"' | cut -d'"' -f4)
OPENCODE_BASE="https://github.com/sst/opencode/releases/download/v${OPENCODE_VERSION}"
OPENCODE_URL="${OPENCODE_BASE}/opencode-linux-${ARCH}"
CHECKSUM_URL="${OPENCODE_BASE}/checksums.txt"
if [[ -f "$BIN_DIR/opencode" ]]; then
echo -e "${GREEN}[OK]${NC} OpenCode binary already exists"
if [[ ! -f "package.json" ]]; then
echo -e "${RED}[ERROR]${NC} package.json not found"
((ERRORS++))
else
echo -e "${BLUE}[INFO]${NC} Downloading OpenCode v${OPENCODE_VERSION}"
curl -L -o "$BIN_DIR/opencode.tmp" "$OPENCODE_URL"
curl -L -o "$BIN_DIR/checksums.txt" "$CHECKSUM_URL"
EXPECTED_HASH=$(grep "opencode-linux-${ARCH}" "$BIN_DIR/checksums.txt" | awk '{print $1}')
ACTUAL_HASH=$(sha256sum "$BIN_DIR/opencode.tmp" | awk '{print $1}')
if [[ "$ACTUAL_HASH" == "$EXPECTED_HASH" ]]; then
mv "$BIN_DIR/opencode.tmp" "$BIN_DIR/opencode"
chmod +x "$BIN_DIR/opencode"
echo -e "${GREEN}[OK]${NC} OpenCode downloaded and verified"
echo -e "${GREEN}[INFO]${NC} Running npm install..."
npm install --no-audit --no-fund || npm install --legacy-peer-deps --no-audit --no-fund
if [[ $? -eq 0 ]]; then
echo -e "${GREEN}[OK]${NC} Dependencies installed"
else
echo -e "${RED}[ERROR]${NC} OpenCode checksum mismatch"
rm -f "$BIN_DIR/opencode.tmp"
exit 1
echo -e "${RED}[ERROR]${NC} npm install failed"
((ERRORS++))
fi
fi
# ---------------------------------------------------------------
# STEP 6: OpenCode Setup
# ---------------------------------------------------------------
echo ""
echo "[STEP 6/9] Building UI assets"
if [[ -d "$SCRIPT_DIR/packages/ui/dist" ]]; then
echo -e "${GREEN}[OK]${NC} UI build already exists"
echo "[STEP 6/8] OpenCode Setup..."
echo ""
echo -e "${CYAN}==============================================================${NC}"
echo -e "${CYAN}|${NC} NomadArch supports Binary-Free Mode! ${CYAN}|${NC}"
echo -e "${CYAN}|${NC} Using free cloud models: GPT-5 Nano, Grok Code, etc. ${CYAN}|${NC}"
echo -e "${CYAN}==============================================================${NC}"
echo ""
echo -e "${GREEN}[OK]${NC} Using Binary-Free Mode (default)"
# ---------------------------------------------------------------
# STEP 7: Build Assets
# ---------------------------------------------------------------
echo ""
echo "[STEP 7/8] Building Assets..."
if [[ -f "$TARGET_DIR/packages/ui/dist/index.html" ]]; then
echo -e "${GREEN}[OK]${NC} UI build exists"
else
echo -e "${BLUE}[INFO]${NC} Building UI"
pushd "$SCRIPT_DIR/packages/ui" >/dev/null
npm run build
popd >/dev/null
echo -e "${GREEN}[INFO]${NC} Building UI..."
cd "$TARGET_DIR/packages/ui" && npm run build
if [[ $? -eq 0 ]]; then
echo -e "${GREEN}[OK]${NC} UI assets built"
else
echo -e "${RED}[ERROR]${NC} UI build failed"
((ERRORS++))
fi
cd "$TARGET_DIR" || exit 1
fi
# ---------------------------------------------------------------
# STEP 8: Health Check
# ---------------------------------------------------------------
echo ""
echo "[STEP 7/9] Post-install health check"
HEALTH_ERRORS=0
echo "[STEP 8/8] Running Health Check..."
[[ -f "$SCRIPT_DIR/package.json" ]] || HEALTH_ERRORS=$((HEALTH_ERRORS+1))
[[ -d "$SCRIPT_DIR/packages/ui" ]] || HEALTH_ERRORS=$((HEALTH_ERRORS+1))
[[ -d "$SCRIPT_DIR/packages/server" ]] || HEALTH_ERRORS=$((HEALTH_ERRORS+1))
[[ -f "$SCRIPT_DIR/packages/ui/dist/index.html" ]] || HEALTH_ERRORS=$((HEALTH_ERRORS+1))
HEALTH_OK=1
[[ -f "$TARGET_DIR/package.json" ]] || HEALTH_OK=0
[[ -d "$TARGET_DIR/packages/ui" ]] || HEALTH_OK=0
[[ -d "$TARGET_DIR/packages/server" ]] || HEALTH_OK=0
[[ -d "$TARGET_DIR/node_modules" ]] || HEALTH_OK=0
if [[ $HEALTH_ERRORS -eq 0 ]]; then
echo -e "${GREEN}[OK]${NC} Health checks passed"
if [[ $HEALTH_OK -eq 1 ]]; then
echo -e "${GREEN}[OK]${NC} All checks passed"
else
echo -e "${RED}[ERROR]${NC} Health checks failed ($HEALTH_ERRORS)"
ERRORS=$((ERRORS+HEALTH_ERRORS))
echo -e "${RED}[ERROR]${NC} Health checks failed"
((ERRORS++))
fi
# ---------------------------------------------------------------
# SUMMARY
# ---------------------------------------------------------------
echo ""
echo "[STEP 8/9] Installation Summary"
echo -e "${CYAN}==============================================================${NC}"
echo -e "${CYAN}|${NC} INSTALLATION SUMMARY ${CYAN}|${NC}"
echo -e "${CYAN}==============================================================${NC}"
echo ""
echo " Install Dir: $TARGET_DIR"
echo " Architecture: $ARCH"
echo " Node.js: $NODE_VERSION"
echo " npm: $NPM_VERSION"
echo " Target: $TARGET_DIR"
echo " Mode: Binary-Free Mode"
echo " Errors: $ERRORS"
echo " Warnings: $WARNINGS"
echo " Log File: $LOG_FILE"
echo ""
echo "[STEP 9/9] Next steps"
if [[ $ERRORS -gt 0 ]]; then
echo -e "${RED}[RESULT]${NC} Installation completed with errors"
echo "Review $LOG_FILE for details."
echo -e "${RED}INSTALLATION FAILED${NC}"
echo "Check the log file: $LOG_FILE"
exit 1
else
echo -e "${GREEN}[RESULT]${NC} Installation completed successfully"
echo "Run: ./Launch-Unix.sh"
echo -e "${GREEN}INSTALLATION SUCCESSFUL!${NC}"
echo ""
echo "To start NomadArch, run:"
echo -e " ${BOLD}./Launch-Linux.sh${NC}"
echo ""
exit 0
fi
exit $ERRORS

View File

@@ -1,221 +1,296 @@
#!/bin/bash
# NomadArch Installer for macOS
# Version: 0.4.0
# Version: 0.6.1 - Universal Edition
set -euo pipefail
# Exit on undefined variables
set -u
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
NC='\033[0m'
BOLD='\033[1m'
# Script directory
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TARGET_DIR="$SCRIPT_DIR"
BIN_DIR="$TARGET_DIR/bin"
LOG_FILE="$TARGET_DIR/install.log"
ERRORS=0
WARNINGS=0
NEEDS_FALLBACK=0
BINARY_FREE_MODE=1
# Logging function
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" >> "$LOG_FILE"
}
echo ""
echo "NomadArch Installer (macOS)"
echo "Version: 0.4.0"
echo ""
print_header() {
echo ""
echo -e "${CYAN}==============================================================${NC}"
echo -e "${CYAN}|${NC} ${BOLD}NomadArch Installer for macOS${NC} ${CYAN}|${NC}"
echo -e "${CYAN}|${NC} Version: 0.6.1 - Universal Edition ${CYAN}|${NC}"
echo -e "${CYAN}==============================================================${NC}"
echo ""
}
log "Installer started"
print_header
log "========== Installer started =========="
# ---------------------------------------------------------------
# STEP 1: OS and Architecture Detection
# ---------------------------------------------------------------
echo "[STEP 1/8] Detecting System..."
echo "[STEP 1/9] OS and Architecture Detection"
OS_TYPE=$(uname -s)
ARCH_TYPE=$(uname -m)
log "OS: $OS_TYPE"
log "Architecture: $ARCH_TYPE"
log "OS: $OS_TYPE, Arch: $ARCH_TYPE"
if [[ "$OS_TYPE" != "Darwin" ]]; then
echo -e "${RED}[ERROR]${NC} This installer is for macOS. Current OS: $OS_TYPE"
echo " Use Install-Linux.sh for Linux or Install-Windows.bat for Windows."
log "ERROR: Not macOS ($OS_TYPE)"
exit 1
fi
case "$ARCH_TYPE" in
arm64) ARCH="arm64" ;;
x86_64) ARCH="x64" ;;
arm64) ARCH="arm64" ;;
*)
echo -e "${RED}[ERROR]${NC} Unsupported architecture: $ARCH_TYPE"
log "ERROR: Unsupported arch $ARCH_TYPE"
exit 1
echo -e "${YELLOW}[WARN]${NC} Unusual architecture: $ARCH_TYPE (proceeding anyway)"
ARCH="$ARCH_TYPE"
((WARNINGS++)) || true
;;
esac
echo -e "${GREEN}[OK]${NC} OS: macOS"
echo -e "${GREEN}[OK]${NC} Architecture: $ARCH_TYPE"
echo -e "${GREEN}[OK]${NC} OS: macOS ($OS_TYPE)"
echo -e "${GREEN}[OK]${NC} Architecture: $ARCH_TYPE ($ARCH)"
# ---------------------------------------------------------------
# STEP 2: Check Write Permissions
# ---------------------------------------------------------------
echo ""
echo "[STEP 2/9] Checking write permissions"
mkdir -p "$BIN_DIR"
echo "[STEP 2/8] Checking Write Permissions..."
mkdir -p "$BIN_DIR" 2>/dev/null || true
if ! touch "$SCRIPT_DIR/.install-write-test" 2>/dev/null; then
echo -e "${YELLOW}[WARN]${NC} No write access to $SCRIPT_DIR"
TARGET_DIR="$HOME/.nomadarch-install"
TARGET_DIR="$HOME/.nomadarch"
BIN_DIR="$TARGET_DIR/bin"
LOG_FILE="$TARGET_DIR/install.log"
mkdir -p "$BIN_DIR"
if ! touch "$TARGET_DIR/.install-write-test" 2>/dev/null; then
echo -e "${RED}[ERROR]${NC} Cannot write to $TARGET_DIR"
log "ERROR: Write permission denied to fallback"
exit 1
fi
rm -f "$TARGET_DIR/.install-write-test"
NEEDS_FALLBACK=1
echo -e "${GREEN}[OK]${NC} Using fallback: $TARGET_DIR"
cp -R "$SCRIPT_DIR/"* "$TARGET_DIR/" 2>/dev/null || true
echo -e "${GREEN}[INFO]${NC} Using fallback location: $TARGET_DIR"
else
rm -f "$SCRIPT_DIR/.install-write-test"
echo -e "${GREEN}[OK]${NC} Write access OK"
rm "$SCRIPT_DIR/.install-write-test" 2>/dev/null
echo -e "${GREEN}[OK]${NC} Write permissions verified"
fi
log "Install target: $TARGET_DIR"
# ---------------------------------------------------------------
# STEP 3: Check and Install Node.js
# ---------------------------------------------------------------
echo ""
echo "[STEP 3/9] Ensuring system dependencies"
echo "[STEP 3/8] Checking Node.js..."
if ! command -v curl >/dev/null 2>&1; then
echo -e "${RED}[ERROR]${NC} curl is required but not available"
exit 1
NODE_OK=0
NPM_OK=0
if command -v node >/dev/null 2>&1; then
NODE_VERSION=$(node --version)
echo -e "${GREEN}[OK]${NC} Node.js found: $NODE_VERSION"
NODE_OK=1
fi
if ! command -v brew >/dev/null 2>&1; then
echo -e "${YELLOW}[INFO]${NC} Homebrew not found. Installing..."
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
if [[ $NODE_OK -eq 0 ]]; then
echo -e "${YELLOW}[INFO]${NC} Node.js not found. Attempting automatic installation..."
log "Node.js not found, attempting install"
MISSING_PKGS=()
command -v git >/dev/null 2>&1 || MISSING_PKGS+=("git")
command -v node >/dev/null 2>&1 || MISSING_PKGS+=("node")
if [[ ${#MISSING_PKGS[@]} -gt 0 ]]; then
echo -e "${BLUE}[INFO]${NC} Installing: ${MISSING_PKGS[*]}"
brew install "${MISSING_PKGS[@]}"
fi
if ! command -v node >/dev/null 2>&1; then
echo -e "${RED}[ERROR]${NC} Node.js install failed"
exit 1
fi
NODE_VERSION=$(node --version)
NODE_MAJOR=$(echo "$NODE_VERSION" | cut -d'v' -f2 | cut -d'.' -f1)
echo -e "${GREEN}[OK]${NC} Node.js: $NODE_VERSION"
if [[ $NODE_MAJOR -lt 18 ]]; then
echo -e "${YELLOW}[WARN]${NC} Node.js 18+ is recommended"
((WARNINGS++))
fi
if ! command -v npm >/dev/null 2>&1; then
echo -e "${RED}[ERROR]${NC} npm is not available"
exit 1
fi
NPM_VERSION=$(npm --version)
echo -e "${GREEN}[OK]${NC} npm: $NPM_VERSION"
if command -v git >/dev/null 2>&1; then
echo -e "${GREEN}[OK]${NC} Git: $(git --version)"
else
echo -e "${YELLOW}[WARN]${NC} Git not found (optional)"
((WARNINGS++))
fi
echo ""
echo "[STEP 4/9] Installing npm dependencies"
cd "$SCRIPT_DIR"
log "Running npm install"
if ! npm install; then
echo -e "${RED}[ERROR]${NC} npm install failed"
log "ERROR: npm install failed"
exit 1
fi
echo -e "${GREEN}[OK]${NC} Dependencies installed"
echo ""
echo "[STEP 5/9] Fetching OpenCode binary"
mkdir -p "$BIN_DIR"
OPENCODE_VERSION=$(curl -s https://api.github.com/repos/sst/opencode/releases/latest | grep '"tag_name"' | cut -d'"' -f4)
OPENCODE_BASE="https://github.com/sst/opencode/releases/download/v${OPENCODE_VERSION}"
OPENCODE_URL="${OPENCODE_BASE}/opencode-darwin-${ARCH}"
CHECKSUM_URL="${OPENCODE_BASE}/checksums.txt"
if [[ -f "$BIN_DIR/opencode" ]]; then
echo -e "${GREEN}[OK]${NC} OpenCode binary already exists"
else
echo -e "${BLUE}[INFO]${NC} Downloading OpenCode v${OPENCODE_VERSION}"
curl -L -o "$BIN_DIR/opencode.tmp" "$OPENCODE_URL"
curl -L -o "$BIN_DIR/checksums.txt" "$CHECKSUM_URL"
EXPECTED_HASH=$(grep "opencode-darwin-${ARCH}" "$BIN_DIR/checksums.txt" | awk '{print $1}')
ACTUAL_HASH=$(shasum -a 256 "$BIN_DIR/opencode.tmp" | awk '{print $1}')
if [[ "$ACTUAL_HASH" == "$EXPECTED_HASH" ]]; then
mv "$BIN_DIR/opencode.tmp" "$BIN_DIR/opencode"
chmod +x "$BIN_DIR/opencode"
echo -e "${GREEN}[OK]${NC} OpenCode downloaded and verified"
# Check for Homebrew
if command -v brew >/dev/null 2>&1; then
echo -e "${GREEN}[INFO]${NC} Installing Node.js via Homebrew..."
brew install node
if [[ $? -eq 0 ]]; then
echo -e "${GREEN}[OK]${NC} Node.js installed via Homebrew"
NODE_OK=1
else
echo -e "${RED}[ERROR]${NC} OpenCode checksum mismatch"
rm -f "$BIN_DIR/opencode.tmp"
exit 1
echo -e "${RED}[ERROR]${NC} Homebrew install failed"
fi
else
echo -e "${YELLOW}[WARN]${NC} Homebrew not found. Trying direct download..."
# Download macOS installer
DOWNLOAD_URL="https://nodejs.org/dist/v20.10.0/node-v20.10.0.pkg"
PKG_FILE="$TARGET_DIR/node-installer.pkg"
echo -e "${GREEN}[INFO]${NC} Downloading Node.js installer..."
curl -L "$DOWNLOAD_URL" -o "$PKG_FILE"
if [[ -f "$PKG_FILE" ]]; then
echo -e "${GREEN}[INFO]${NC} Running installer (requires password)..."
if sudo installer -pkg "$PKG_FILE" -target /; then
echo -e "${GREEN}[OK]${NC} Node.js installed successfully"
NODE_OK=1
else
echo -e "${RED}[ERROR]${NC} Node.js installation failed"
fi
rm "$PKG_FILE"
else
echo -e "${RED}[ERROR]${NC} Failed to download Node.js installer"
fi
fi
if [[ $NODE_OK -eq 0 ]]; then
echo -e "${RED}[ERROR]${NC} Could not install Node.js automatically."
echo "Please install Node.js manually from https://nodejs.org/"
echo "and run this installer again."
log "ERROR: Node.js installation failed"
((ERRORS++))
fi
fi
echo ""
echo "[STEP 6/9] Building UI assets"
if [[ -d "$SCRIPT_DIR/packages/ui/dist" ]]; then
echo -e "${GREEN}[OK]${NC} UI build already exists"
# Check npm
if command -v npm >/dev/null 2>&1; then
NPM_VERSION=$(npm --version)
echo -e "${GREEN}[OK]${NC} npm found: $NPM_VERSION"
NPM_OK=1
else
echo -e "${BLUE}[INFO]${NC} Building UI"
pushd "$SCRIPT_DIR/packages/ui" >/dev/null
npm run build
popd >/dev/null
echo -e "${RED}[ERROR]${NC} npm not found (check Node.js installation)"
((ERRORS++))
fi
# ---------------------------------------------------------------
# STEP 4: Check Git (Optional)
# ---------------------------------------------------------------
echo ""
echo "[STEP 4/8] Checking Git (optional)..."
if command -v git >/dev/null 2>&1; then
GIT_VERSION=$(git --version)
echo -e "${GREEN}[OK]${NC} $GIT_VERSION"
else
echo -e "${YELLOW}[INFO]${NC} Git not found (optional)"
((WARNINGS++))
fi
# ---------------------------------------------------------------
# STEP 5: Install Dependencies
# ---------------------------------------------------------------
echo ""
echo "[STEP 5/8] Installing Dependencies..."
cd "$TARGET_DIR" || exit 1
if [[ ! -f "package.json" ]]; then
echo -e "${RED}[ERROR]${NC} package.json not found in $TARGET_DIR"
log "ERROR: package.json missing"
((ERRORS++))
else
echo -e "${GREEN}[INFO]${NC} Running npm install..."
log "Running npm install"
if npm install --no-audit --no-fund; then
echo -e "${GREEN}[OK]${NC} Dependencies installed"
else
echo -e "${YELLOW}[WARN]${NC} npm install issues, trying legacy peer deps..."
if npm install --legacy-peer-deps --no-audit --no-fund; then
echo -e "${GREEN}[OK]${NC} Dependencies installed (legacy mode)"
else
echo -e "${RED}[ERROR]${NC} npm install failed"
log "ERROR: npm install failed"
((ERRORS++))
fi
fi
fi
# ---------------------------------------------------------------
# STEP 6: OpenCode Setup
# ---------------------------------------------------------------
echo ""
echo "[STEP 6/8] OpenCode Setup..."
echo ""
echo -e "${CYAN}==============================================================${NC}"
echo -e "${CYAN}|${NC} NomadArch supports Binary-Free Mode! ${CYAN}|${NC}"
echo -e "${CYAN}|${NC} Using free cloud models: GPT-5 Nano, Grok Code, etc. ${CYAN}|${NC}"
echo -e "${CYAN}==============================================================${NC}"
echo ""
echo -e "${GREEN}[OK]${NC} Using Binary-Free Mode (default)"
log "Using Binary-Free Mode"
# ---------------------------------------------------------------
# STEP 7: Build Assets
# ---------------------------------------------------------------
echo ""
echo "[STEP 7/8] Building Assets..."
if [[ -f "$TARGET_DIR/packages/ui/dist/index.html" ]]; then
echo -e "${GREEN}[OK]${NC} UI build exists"
else
echo -e "${GREEN}[INFO]${NC} Building UI..."
cd "$TARGET_DIR/packages/ui" || exit 1
if npm run build; then
echo -e "${GREEN}[OK]${NC} UI assets built"
else
echo -e "${RED}[ERROR]${NC} UI build failed"
log "ERROR: UI build failed"
((ERRORS++))
fi
cd "$TARGET_DIR" || exit 1
fi
# ---------------------------------------------------------------
# STEP 8: Health Check
# ---------------------------------------------------------------
echo ""
echo "[STEP 7/9] Post-install health check"
HEALTH_ERRORS=0
echo "[STEP 8/8] Running Health Check..."
[[ -f "$SCRIPT_DIR/package.json" ]] || HEALTH_ERRORS=$((HEALTH_ERRORS+1))
[[ -d "$SCRIPT_DIR/packages/ui" ]] || HEALTH_ERRORS=$((HEALTH_ERRORS+1))
[[ -d "$SCRIPT_DIR/packages/server" ]] || HEALTH_ERRORS=$((HEALTH_ERRORS+1))
[[ -f "$SCRIPT_DIR/packages/ui/dist/index.html" ]] || HEALTH_ERRORS=$((HEALTH_ERRORS+1))
HEALTH_OK=1
if [[ $HEALTH_ERRORS -eq 0 ]]; then
echo -e "${GREEN}[OK]${NC} Health checks passed"
[[ -f "$TARGET_DIR/package.json" ]] || HEALTH_OK=0
[[ -d "$TARGET_DIR/packages/ui" ]] || HEALTH_OK=0
[[ -d "$TARGET_DIR/packages/server" ]] || HEALTH_OK=0
[[ -d "$TARGET_DIR/node_modules" ]] || HEALTH_OK=0
if [[ $HEALTH_OK -eq 1 ]]; then
echo -e "${GREEN}[OK]${NC} All checks passed"
else
echo -e "${RED}[ERROR]${NC} Health checks failed ($HEALTH_ERRORS)"
ERRORS=$((ERRORS+HEALTH_ERRORS))
echo -e "${RED}[ERROR]${NC} Health checks failed"
((ERRORS++))
fi
# ---------------------------------------------------------------
# SUMMARY
# ---------------------------------------------------------------
echo ""
echo "[STEP 8/9] Installation Summary"
echo -e "${CYAN}==============================================================${NC}"
echo -e "${CYAN}|${NC} INSTALLATION SUMMARY ${CYAN}|${NC}"
echo -e "${CYAN}==============================================================${NC}"
echo ""
echo " Install Dir: $TARGET_DIR"
echo " Architecture: $ARCH"
echo " Node.js: $NODE_VERSION"
echo " npm: $NPM_VERSION"
echo " Target: $TARGET_DIR"
echo " Mode: Binary-Free Mode"
echo " Errors: $ERRORS"
echo " Warnings: $WARNINGS"
echo " Log File: $LOG_FILE"
echo ""
echo "[STEP 9/9] Next steps"
if [[ $ERRORS -gt 0 ]]; then
echo -e "${RED}[RESULT]${NC} Installation completed with errors"
echo "Review $LOG_FILE for details."
echo -e "${RED}==============================================================${NC}"
echo -e "${RED} INSTALLATION FAILED${NC}"
echo -e "${RED}==============================================================${NC}"
echo "Check the log file: $LOG_FILE"
exit 1
else
echo -e "${GREEN}[RESULT]${NC} Installation completed successfully"
echo "Run: ./Launch-Unix.sh"
echo -e "${GREEN}==============================================================${NC}"
echo -e "${GREEN} INSTALLATION SUCCESSFUL!${NC}"
echo -e "${GREEN}==============================================================${NC}"
echo ""
echo "To start NomadArch, run:"
echo -e " ${BOLD}./Launch-Mac.sh${NC}"
echo ""
exit 0
fi
exit $ERRORS

View File

@@ -1,11 +1,16 @@
@echo off
REM NomadArch Windows Installer - ASCII Safe Version
REM This installer uses only ASCII characters for maximum compatibility
setlocal enabledelayedexpansion
title NomadArch Installer
title NomadArch Installer - Windows
echo.
echo NomadArch Installer (Windows)
echo Version: 0.4.0
echo ===============================================================
echo NomadArch Installer for Windows
echo Version: 0.6.1 - Universal Edition
echo ===============================================================
echo.
set SCRIPT_DIR=%~dp0
@@ -17,187 +22,251 @@ set TEMP_DIR=%TARGET_DIR%\.install-temp
set ERRORS=0
set WARNINGS=0
set NEEDS_FALLBACK=0
set SKIP_OPENCODE=1
set NODE_INSTALLED_NOW=0
echo [%date% %time%] Installer started >> "%LOG_FILE%"
echo [%date% %time%] ========== Installer started ========== >> "%LOG_FILE%"
echo [STEP 1/9] OS and Architecture Detection
wmic os get osarchitecture | findstr /i "64-bit" >nul
if %ERRORLEVEL% equ 0 (
REM ---------------------------------------------------------------
REM STEP 1: OS and Architecture Detection
REM ---------------------------------------------------------------
echo [STEP 1/8] Detecting System...
for /f "tokens=2 delims==" %%a in ('wmic os get osarchitecture /value 2^>nul ^| find "="') do set ARCH_RAW=%%a
if "!ARCH_RAW!"=="" set ARCH_RAW=64-bit
echo !ARCH_RAW! | findstr /i "64" >nul
if !ERRORLEVEL! equ 0 (
set ARCH=x64
) else (
set ARCH=x86
)
echo [OK] Architecture: %ARCH%
for /f "tokens=4-5 delims=. " %%i in ('ver') do set WIN_VER=%%i.%%j
echo [OK] Windows Version: !WIN_VER!
echo [OK] Architecture: !ARCH!
echo [%date% %time%] OS: Windows !WIN_VER!, Arch: !ARCH! >> "%LOG_FILE%"
REM ---------------------------------------------------------------
REM STEP 2: Check Write Permissions
REM ---------------------------------------------------------------
echo.
echo [STEP 2/9] Checking write permissions
if not exist "%BIN_DIR%" mkdir "%BIN_DIR%"
if not exist "%TEMP_DIR%" mkdir "%TEMP_DIR%"
echo [STEP 2/8] Checking Write Permissions...
echo. > "%SCRIPT_DIR%\test-write.tmp" 2>nul
if %ERRORLEVEL% neq 0 (
echo [WARN] Cannot write to current directory: %SCRIPT_DIR%
set TARGET_DIR=%USERPROFILE%\NomadArch-Install
set BIN_DIR=%TARGET_DIR%\bin
set LOG_FILE=%TARGET_DIR%\install.log
set TEMP_DIR=%TARGET_DIR%\.install-temp
if not exist "%TARGET_DIR%" mkdir "%TARGET_DIR%"
if not exist "%BIN_DIR%" mkdir "%BIN_DIR%"
if not exist "%TEMP_DIR%" mkdir "%TEMP_DIR%"
echo. > "%TARGET_DIR%\test-write.tmp" 2>nul
if %ERRORLEVEL% neq 0 (
echo [ERROR] Cannot write to fallback directory: %TARGET_DIR%
echo [%date% %time%] ERROR: Write permission denied >> "%LOG_FILE%"
set /a ERRORS+=1
goto :SUMMARY
)
del "%TARGET_DIR%\test-write.tmp"
set NEEDS_FALLBACK=1
echo [OK] Using fallback: %TARGET_DIR%
if not exist "%BIN_DIR%" mkdir "%BIN_DIR%" 2>nul
if not exist "%TEMP_DIR%" mkdir "%TEMP_DIR%" 2>nul
echo. > "%SCRIPT_DIR%\.write-test.tmp" 2>nul
if !ERRORLEVEL! neq 0 (
echo [WARN] Cannot write to: %SCRIPT_DIR%
echo [INFO] Using fallback location in user profile...
set TARGET_DIR=%USERPROFILE%\NomadArch
set BIN_DIR=!TARGET_DIR!\bin
set LOG_FILE=!TARGET_DIR!\install.log
set TEMP_DIR=!TARGET_DIR!\.install-temp
if not exist "!TARGET_DIR!" mkdir "!TARGET_DIR!"
if not exist "!BIN_DIR!" mkdir "!BIN_DIR!"
if not exist "!TEMP_DIR!" mkdir "!TEMP_DIR!"
echo [OK] Using fallback: !TARGET_DIR!
) else (
del "%SCRIPT_DIR%\test-write.tmp"
del "%SCRIPT_DIR%\.write-test.tmp" 2>nul
echo [OK] Write permissions verified
)
echo [%date% %time%] Install target: %TARGET_DIR% >> "%LOG_FILE%"
REM ---------------------------------------------------------------
REM STEP 3: Check and Install Node.js
REM ---------------------------------------------------------------
echo.
echo [STEP 3/9] Ensuring system dependencies
echo [STEP 3/8] Checking Node.js...
set WINGET_AVAILABLE=0
where winget >nul 2>&1 && set WINGET_AVAILABLE=1
set CHOCO_AVAILABLE=0
where choco >nul 2>&1 && set CHOCO_AVAILABLE=1
set DOWNLOAD_CMD=
where curl >nul 2>&1
if %ERRORLEVEL% equ 0 (
set DOWNLOAD_CMD=curl
) else (
set DOWNLOAD_CMD=powershell
)
set NODE_OK=0
set NPM_OK=0
where node >nul 2>&1
if %ERRORLEVEL% neq 0 (
echo [INFO] Node.js not found. Attempting to install...
if %WINGET_AVAILABLE% equ 1 (
winget install -e --id OpenJS.NodeJS.LTS --accept-source-agreements --accept-package-agreements
) else if %CHOCO_AVAILABLE% equ 1 (
choco install nodejs-lts -y
if !ERRORLEVEL! equ 0 (
for /f "tokens=*" %%v in ('node --version 2^>nul') do set NODE_VERSION=%%v
if defined NODE_VERSION (
echo [OK] Node.js found: !NODE_VERSION!
set NODE_OK=1
)
)
if !NODE_OK! equ 0 (
echo [INFO] Node.js not found. Attempting automatic installation...
echo [%date% %time%] Node.js not found, attempting install >> "%LOG_FILE%"
REM Try winget first (Windows 10 1709+)
where winget >nul 2>&1
if !ERRORLEVEL! equ 0 (
echo [INFO] Installing Node.js via winget...
winget install -e --id OpenJS.NodeJS.LTS --accept-source-agreements --accept-package-agreements --silent 2>nul
if !ERRORLEVEL! equ 0 (
set NODE_INSTALLED_NOW=1
echo [OK] Node.js installed via winget
) else (
echo [ERROR] No supported package manager found (winget/choco).
echo Please install Node.js LTS from https://nodejs.org/
echo [WARN] Winget install failed, trying alternative...
)
)
REM Try chocolatey if winget failed
if !NODE_INSTALLED_NOW! equ 0 (
where choco >nul 2>&1
if !ERRORLEVEL! equ 0 (
echo [INFO] Installing Node.js via Chocolatey...
choco install nodejs-lts -y 2>nul
if !ERRORLEVEL! equ 0 (
set NODE_INSTALLED_NOW=1
echo [OK] Node.js installed via Chocolatey
)
)
)
REM Direct download as last resort
if !NODE_INSTALLED_NOW! equ 0 (
echo [INFO] Downloading Node.js installer directly...
set NODE_INSTALLER=%TEMP_DIR%\node-installer.msi
REM Download using PowerShell with proper error handling
powershell -NoProfile -ExecutionPolicy Bypass -Command "$ProgressPreference = 'SilentlyContinue'; try { Invoke-WebRequest -Uri 'https://nodejs.org/dist/v20.10.0/node-v20.10.0-x64.msi' -OutFile '%TEMP_DIR%\node-installer.msi' -UseBasicParsing; exit 0 } catch { exit 1 }" 2>nul
if exist "%TEMP_DIR%\node-installer.msi" (
echo [INFO] Running Node.js installer...
msiexec /i "%TEMP_DIR%\node-installer.msi" /qn /norestart 2>nul
if !ERRORLEVEL! equ 0 (
set NODE_INSTALLED_NOW=1
echo [OK] Node.js installed successfully
) else (
echo [ERROR] Node.js MSI installation failed
)
del "%TEMP_DIR%\node-installer.msi" 2>nul
) else (
echo [ERROR] Failed to download Node.js installer
)
)
if !NODE_INSTALLED_NOW! equ 1 (
echo.
echo ===============================================================
echo IMPORTANT: Node.js was just installed!
echo Please CLOSE this window and run Install-Windows.bat again.
echo This is required for the PATH to update.
echo ===============================================================
echo.
echo [%date% %time%] Node.js installed, restart required >> "%LOG_FILE%"
pause
exit /b 0
) else (
echo.
echo [ERROR] Could not install Node.js automatically.
echo.
echo Please install Node.js manually:
echo 1. Go to https://nodejs.org/
echo 2. Download and install the LTS version
echo 3. Restart this installer
echo.
echo [%date% %time%] ERROR: Node.js installation failed >> "%LOG_FILE%"
set /a ERRORS+=1
goto :SUMMARY
)
)
where node >nul 2>&1
if %ERRORLEVEL% neq 0 (
echo [ERROR] Node.js install failed or requires a new terminal session.
set /a ERRORS+=1
goto :SUMMARY
)
for /f "tokens=*" %%i in ('node --version') do set NODE_VERSION=%%i
echo [OK] Node.js: %NODE_VERSION%
REM Verify npm
where npm >nul 2>&1
if %ERRORLEVEL% neq 0 (
echo [ERROR] npm not found after Node.js install.
if !ERRORLEVEL! equ 0 (
for /f "tokens=*" %%v in ('npm --version 2^>nul') do set NPM_VERSION=%%v
if defined NPM_VERSION (
echo [OK] npm found: !NPM_VERSION!
set NPM_OK=1
)
)
if !NPM_OK! equ 0 (
echo [ERROR] npm not found. This usually comes with Node.js.
echo [%date% %time%] ERROR: npm not found >> "%LOG_FILE%"
set /a ERRORS+=1
goto :SUMMARY
)
for /f "tokens=*" %%i in ('npm --version') do set NPM_VERSION=%%i
echo [OK] npm: %NPM_VERSION%
REM ---------------------------------------------------------------
REM STEP 4: Check Git (optional)
REM ---------------------------------------------------------------
echo.
echo [STEP 4/8] Checking Git (optional)...
where git >nul 2>&1
if %ERRORLEVEL% neq 0 (
echo [INFO] Git not found. Attempting to install...
if %WINGET_AVAILABLE% equ 1 (
winget install -e --id Git.Git --accept-source-agreements --accept-package-agreements
) else if %CHOCO_AVAILABLE% equ 1 (
choco install git -y
) else (
echo [WARN] Git not installed (optional). Continue.
set /a WARNINGS+=1
)
if !ERRORLEVEL! equ 0 (
for /f "tokens=*" %%v in ('git --version 2^>nul') do set GIT_VERSION=%%v
echo [OK] !GIT_VERSION!
) else (
for /f "tokens=*" %%i in ('git --version') do set GIT_VERSION=%%i
echo [OK] Git: %GIT_VERSION%
echo [INFO] Git not found (optional - not required for basic usage)
set /a WARNINGS+=1
)
REM ---------------------------------------------------------------
REM STEP 5: Install npm Dependencies
REM ---------------------------------------------------------------
echo.
echo [STEP 4/9] Installing npm dependencies
echo [STEP 5/8] Installing Dependencies...
cd /d "%SCRIPT_DIR%"
if not exist "package.json" (
echo [ERROR] package.json not found in %SCRIPT_DIR%
echo [ERROR] Make sure you extracted the full NomadArch package.
echo [%date% %time%] ERROR: package.json missing >> "%LOG_FILE%"
set /a ERRORS+=1
goto :SUMMARY
)
echo [INFO] Running npm install (this may take a few minutes)...
echo [%date% %time%] Running npm install >> "%LOG_FILE%"
call npm install
if %ERRORLEVEL% neq 0 (
call npm install --no-audit --no-fund 2>&1
if !ERRORLEVEL! neq 0 (
echo [WARN] npm install had issues, trying with legacy peer deps...
call npm install --legacy-peer-deps --no-audit --no-fund 2>&1
if !ERRORLEVEL! neq 0 (
echo [ERROR] npm install failed!
echo [%date% %time%] ERROR: npm install failed >> "%LOG_FILE%"
set /a ERRORS+=1
goto :SUMMARY
)
)
echo [OK] Dependencies installed
REM ---------------------------------------------------------------
REM STEP 6: OpenCode Binary (OPTIONAL)
REM ---------------------------------------------------------------
echo.
echo [STEP 5/9] Fetching OpenCode binary
if not exist "%BIN_DIR%" mkdir "%BIN_DIR%"
for /f "delims=" %%v in ('powershell -NoProfile -Command "(Invoke-WebRequest -UseBasicParsing https://api.github.com/repos/sst/opencode/releases/latest).Content ^| Select-String -Pattern '""tag_name""' ^| ForEach-Object { $_.Line.Split(''\"'')[3] }"') do (
set OPENCODE_VERSION=%%v
)
set OPENCODE_BASE=https://github.com/sst/opencode/releases/download/v!OPENCODE_VERSION!
set OPENCODE_URL=!OPENCODE_BASE!/opencode-windows-%ARCH%.exe
set CHECKSUM_URL=!OPENCODE_BASE!/checksums.txt
if exist "%BIN_DIR%\opencode.exe" (
echo [OK] OpenCode binary already exists
echo [%date% %time%] OpenCode binary exists, skipping download >> "%LOG_FILE%"
) else (
echo [INFO] Downloading OpenCode v!OPENCODE_VERSION!...
if "%DOWNLOAD_CMD%"=="curl" (
curl -L -o "%BIN_DIR%\opencode.exe.tmp" "!OPENCODE_URL!"
curl -L -o "%BIN_DIR%\checksums.txt" "!CHECKSUM_URL!"
) else (
powershell -NoProfile -Command "Invoke-WebRequest -Uri '%OPENCODE_URL%' -OutFile '%BIN_DIR%\\opencode.exe.tmp'"
powershell -NoProfile -Command "Invoke-WebRequest -Uri '%CHECKSUM_URL%' -OutFile '%BIN_DIR%\\checksums.txt'"
)
set EXPECTED_HASH=
for /f "tokens=1,2" %%h in ('type "%BIN_DIR%\checksums.txt" ^| findstr /i "opencode-windows-%ARCH%"') do (
set EXPECTED_HASH=%%h
)
set ACTUAL_HASH=
for /f "skip=1 tokens=*" %%h in ('certutil -hashfile "%BIN_DIR%\opencode.exe.tmp" SHA256 ^| findstr /v "CertUtil" ^| findstr /v "hash of"') do (
set ACTUAL_HASH=%%h
goto :hash_found
)
:hash_found
if "!ACTUAL_HASH!"=="!EXPECTED_HASH!" (
move /Y "%BIN_DIR%\opencode.exe.tmp" "%BIN_DIR%\opencode.exe"
echo [OK] OpenCode downloaded and verified
) else (
echo [ERROR] OpenCode checksum mismatch!
del "%BIN_DIR%\opencode.exe.tmp"
set /a ERRORS+=1
)
)
echo [STEP 6/8] OpenCode Binary Setup...
echo.
echo [STEP 6/9] Building UI assets
echo ===============================================================
echo NomadArch supports Binary-Free Mode!
echo You can skip the OpenCode binary and use free cloud models:
echo - GPT-5 Nano, Grok Code, GLM-4.7, Doubao, and more
echo ===============================================================
echo.
set SKIP_OPENCODE=1
echo [OK] Using Binary-Free Mode (default)
echo [%date% %time%] Using Binary-Free Mode >> "%LOG_FILE%"
REM ---------------------------------------------------------------
REM STEP 7: Build UI Assets
REM ---------------------------------------------------------------
echo.
echo [STEP 7/8] Building UI Assets...
if exist "%SCRIPT_DIR%\packages\ui\dist\index.html" (
echo [OK] UI build already exists
) else (
echo [INFO] Building UI assets...
pushd packages\ui
call npm run build
if %ERRORLEVEL% neq 0 (
echo [INFO] Building UI (this may take 1-2 minutes)...
pushd "%SCRIPT_DIR%\packages\ui"
call npm run build 2>&1
if !ERRORLEVEL! neq 0 (
echo [ERROR] UI build failed!
echo [%date% %time%] ERROR: UI build failed >> "%LOG_FILE%"
popd
set /a ERRORS+=1
goto :SUMMARY
@@ -206,48 +275,95 @@ if exist "%SCRIPT_DIR%\packages\ui\dist\index.html" (
echo [OK] UI assets built successfully
)
REM ---------------------------------------------------------------
REM STEP 8: Health Check and Summary
REM ---------------------------------------------------------------
echo.
echo [STEP 7/9] Post-install health check
set HEALTH_ERRORS=0
echo [STEP 8/8] Running Health Check...
if not exist "%SCRIPT_DIR%\package.json" set /a HEALTH_ERRORS+=1
if not exist "%SCRIPT_DIR%\packages\ui" set /a HEALTH_ERRORS+=1
if not exist "%SCRIPT_DIR%\packages\server" set /a HEALTH_ERRORS+=1
if not exist "%SCRIPT_DIR%\packages\ui\dist\index.html" set /a HEALTH_ERRORS+=1
set HEALTH_OK=1
if %HEALTH_ERRORS% equ 0 (
echo [OK] Health checks passed
) else (
echo [ERROR] Health checks failed (%HEALTH_ERRORS%)
set /a ERRORS+=%HEALTH_ERRORS%
if not exist "%SCRIPT_DIR%\package.json" (
echo [FAIL] package.json missing
set HEALTH_OK=0
)
if not exist "%SCRIPT_DIR%\packages\ui" (
echo [FAIL] packages\ui directory missing
set HEALTH_OK=0
)
if not exist "%SCRIPT_DIR%\packages\server" (
echo [FAIL] packages\server directory missing
set HEALTH_OK=0
)
if not exist "%SCRIPT_DIR%\packages\ui\dist\index.html" (
echo [FAIL] UI build missing (packages\ui\dist\index.html)
set HEALTH_OK=0
)
if not exist "%SCRIPT_DIR%\node_modules" (
echo [FAIL] node_modules directory missing
set HEALTH_OK=0
)
if !HEALTH_OK! equ 1 (
echo [OK] All health checks passed
) else (
echo [ERROR] Health checks failed
set /a ERRORS+=1
)
:SUMMARY
echo.
echo [STEP 8/9] Installation Summary
echo ===============================================================
echo INSTALLATION SUMMARY
echo ===============================================================
echo.
echo Install Dir: %TARGET_DIR%
echo Architecture: %ARCH%
echo Node.js: %NODE_VERSION%
echo npm: %NPM_VERSION%
echo Errors: %ERRORS%
echo Warnings: %WARNINGS%
echo Install Directory: %TARGET_DIR%
echo Architecture: !ARCH!
if defined NODE_VERSION echo Node.js: !NODE_VERSION!
if defined NPM_VERSION echo npm: !NPM_VERSION!
echo Mode: Binary-Free Mode
echo Errors: !ERRORS!
echo Warnings: !WARNINGS!
echo Log File: %LOG_FILE%
echo.
echo [STEP 9/9] Next steps
:SUMMARY
if %ERRORS% gtr 0 (
echo [RESULT] Installation completed with errors.
echo Review the log: %LOG_FILE%
if !ERRORS! gtr 0 (
echo ===============================================================
echo INSTALLATION FAILED
echo ===============================================================
echo.
echo If Node.js was just installed, open a new terminal and run this installer again.
echo Review the errors above and check the log file: %LOG_FILE%
echo.
echo Common fixes:
echo 1. Run as Administrator (right-click, Run as administrator)
echo 2. Ensure internet connection is stable
echo 3. Disable antivirus temporarily
echo 4. Install Node.js manually from https://nodejs.org/
echo.
echo [%date% %time%] Installation FAILED with !ERRORS! errors >> "%LOG_FILE%"
) else (
echo [RESULT] Installation completed successfully.
echo Run Launch-Windows.bat to start the application.
echo ===============================================================
echo INSTALLATION SUCCESSFUL!
echo ===============================================================
echo.
echo To start NomadArch, run:
echo Launch-Windows.bat
echo.
echo Available Free Models:
echo - GPT-5 Nano (fast)
echo - Grok Code (coding)
echo - GLM-4.7 (general)
echo - Doubao (creative)
echo - Big Pickle (experimental)
echo.
echo [%date% %time%] Installation SUCCESSFUL >> "%LOG_FILE%"
)
echo.
echo Press any key to exit...
pause >nul
exit /b %ERRORS%
exit /b !ERRORS!

View File

@@ -66,17 +66,49 @@ SERVER_PORT=$DEFAULT_SERVER_PORT
UI_PORT=$DEFAULT_UI_PORT
for port in {3001..3050}; do
# Try lsof first, then ss, then netstat
if command -v lsof &> /dev/null; then
if ! lsof -i :$port -sTCP:LISTEN -t > /dev/null 2>&1; then
SERVER_PORT=$port
break
fi
elif command -v ss &> /dev/null; then
if ! ss -tuln | grep -q ":$port "; then
SERVER_PORT=$port
break
fi
elif command -v netstat &> /dev/null; then
if ! netstat -tuln | grep -q ":$port "; then
SERVER_PORT=$port
break
fi
else
SERVER_PORT=$port
break
fi
done
for port in {3000..3050}; do
# Try lsof first, then ss, then netstat
if command -v lsof &> /dev/null; then
if ! lsof -i :$port -sTCP:LISTEN -t > /dev/null 2>&1; then
UI_PORT=$port
break
fi
elif command -v ss &> /dev/null; then
if ! ss -tuln | grep -q ":$port "; then
UI_PORT=$port
break
fi
elif command -v netstat &> /dev/null; then
if ! netstat -tuln | grep -q ":$port "; then
UI_PORT=$port
break
fi
else
UI_PORT=$port
break
fi
done
echo -e "${GREEN}[OK]${NC} Server port: $SERVER_PORT"

View File

@@ -6,7 +6,7 @@ color 0B
echo.
echo NomadArch Development Launcher (Windows)
echo Version: 0.4.0
echo Version: 0.5.0 - Binary-Free Mode
echo.
set SCRIPT_DIR=%~dp0
@@ -16,6 +16,7 @@ cd /d "%SCRIPT_DIR%"
set ERRORS=0
set WARNINGS=0
set AUTO_FIXED=0
set BINARY_FREE_MODE=0
echo [PREFLIGHT 1/7] Checking Dependencies...
@@ -46,29 +47,33 @@ echo [PREFLIGHT 2/7] Checking for OpenCode CLI...
where opencode >nul 2>&1
if %ERRORLEVEL% equ 0 (
echo [OK] OpenCode CLI available in PATH
) else (
if exist "bin\opencode.exe" (
echo [OK] OpenCode binary found in bin/
) else (
echo [WARN] OpenCode CLI not found
echo [INFO] Run Install-Windows.bat to set up OpenCode
set /a WARNINGS+=1
)
echo [OK] OpenCode CLI in PATH - Full Mode
goto :opencode_check_done
)
if exist "bin\opencode.exe" (
echo [OK] OpenCode binary in bin/ - Full Mode
goto :opencode_check_done
)
echo [INFO] OpenCode CLI not found - Using Binary-Free Mode
echo [INFO] Free models: GPT-5 Nano, Grok Code, GLM-4.7 via OpenCode Zen
set BINARY_FREE_MODE=1
:opencode_check_done
echo.
echo [PREFLIGHT 3/7] Checking Dependencies...
if not exist "node_modules" (
echo [INFO] Dependencies not installed. Installing now...
call npm install
if %ERRORLEVEL% neq 0 (
if !ERRORLEVEL! neq 0 (
echo [ERROR] Dependency installation failed!
pause
exit /b 1
)
echo [OK] Dependencies installed (auto-fix)
echo [OK] Dependencies installed
set /a AUTO_FIXED+=1
) else (
echo [OK] Dependencies found
@@ -111,14 +116,14 @@ if not exist "packages\ui\dist\index.html" (
echo [INFO] Running UI build...
pushd packages\ui
call npm run build
if %ERRORLEVEL% neq 0 (
if !ERRORLEVEL! neq 0 (
echo [ERROR] UI build failed!
popd
set /a ERRORS+=1
goto :launch_check
)
popd
echo [OK] UI build completed (auto-fix)
echo [OK] UI build completed
set /a AUTO_FIXED+=1
)
@@ -126,12 +131,12 @@ if not exist "packages\electron-app\dist\main\main.js" (
echo [WARN] Electron build incomplete
echo [INFO] Running full build...
call npm run build
if %ERRORLEVEL% neq 0 (
if !ERRORLEVEL! neq 0 (
echo [ERROR] Full build failed!
set /a ERRORS+=1
goto :launch_check
)
echo [OK] Full build completed (auto-fix)
echo [OK] Full build completed
set /a AUTO_FIXED+=1
)
@@ -142,14 +147,19 @@ echo [STATUS]
echo.
echo Node.js: %NODE_VERSION%
echo npm: %NPM_VERSION%
if !BINARY_FREE_MODE! equ 1 (
echo Mode: Binary-Free Mode
) else (
echo Mode: Full Mode with OpenCode
)
echo Auto-fixes applied: !AUTO_FIXED!
echo Warnings: %WARNINGS%
echo Errors: %ERRORS%
echo Warnings: !WARNINGS!
echo Errors: !ERRORS!
echo Server Port: !SERVER_PORT!
echo UI Port: !UI_PORT!
echo.
if %ERRORS% gtr 0 (
if !ERRORS! gtr 0 (
echo [RESULT] Cannot start due to errors!
pause
exit /b 1
@@ -179,4 +189,4 @@ taskkill /F /FI "WINDOWTITLE eq NomadArch Electron*" >nul 2>&1
:launch_check
pause
exit /b %ERRORS%
exit /b !ERRORS!

62
Launch-Unix-Prod.sh Normal file
View File

@@ -0,0 +1,62 @@
#!/bin/bash
# NomadArch Production Launcher for macOS and Linux
# Version: 0.4.0
set -euo pipefail
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
cd "$SCRIPT_DIR"
echo ""
echo "NomadArch Launcher (macOS/Linux, Production Mode)"
echo "Version: 0.4.0"
echo "Features: SMART FIX / APEX / SHIELD / MULTIX MODE"
echo ""
echo "[STEP 1/3] Checking Dependencies..."
if ! command -v node &> /dev/null; then
echo -e "${RED}[ERROR]${NC} Node.js not found!"
echo "Please run the installer first:"
if [[ "$OSTYPE" == "darwin"* ]]; then
echo " ./Install-Mac.sh"
else
echo " ./Install-Linux.sh"
fi
exit 1
fi
NODE_VERSION=$(node --version)
echo -e "${GREEN}[OK]${NC} Node.js: $NODE_VERSION"
echo ""
echo "[STEP 2/3] Checking Pre-Built UI..."
if [[ -d "packages/electron-app/dist/renderer/assets" ]]; then
echo -e "${GREEN}[OK]${NC} Pre-built UI assets found"
else
echo -e "${RED}[ERROR]${NC} Pre-built UI assets not found."
echo "Run: npm run build"
exit 1
fi
echo ""
echo "[STEP 3/3] Starting NomadArch (Production Mode)..."
cd packages/electron-app
npx electron .
EXIT_CODE=$?
if [[ $EXIT_CODE -ne 0 ]]; then
echo ""
echo -e "${RED}[ERROR]${NC} NomadArch exited with an error!"
fi
exit $EXIT_CODE

View File

@@ -1,7 +1,7 @@
#!/bin/bash
# NomadArch Launcher for macOS and Linux
# Version: 0.4.0
# Version: 0.5.0 - Binary-Free Mode
set -euo pipefail
@@ -17,10 +17,11 @@ cd "$SCRIPT_DIR"
ERRORS=0
WARNINGS=0
AUTO_FIXED=0
BINARY_FREE_MODE=0
echo ""
echo "NomadArch Launcher (macOS/Linux)"
echo "Version: 0.4.0"
echo "Version: 0.5.0 - Binary-Free Mode"
echo ""
echo "[PREFLIGHT 1/7] Checking Dependencies..."
@@ -48,16 +49,16 @@ NPM_VERSION=$(npm --version)
echo -e "${GREEN}[OK]${NC} npm: $NPM_VERSION"
echo ""
echo "[PREFLIGHT 2/7] Checking for OpenCode CLI..."
echo "[PREFLIGHT 2/7] Checking for OpenCode CLI (Optional)..."
if command -v opencode &> /dev/null; then
echo -e "${GREEN}[OK]${NC} OpenCode CLI available in PATH"
echo -e "${GREEN}[OK]${NC} OpenCode CLI available in PATH - Full Mode"
elif [[ -f "$SCRIPT_DIR/bin/opencode" ]]; then
echo -e "${GREEN}[OK]${NC} OpenCode binary found in bin/"
echo -e "${GREEN}[OK]${NC} OpenCode binary found in bin/ - Full Mode"
else
echo -e "${YELLOW}[WARN]${NC} OpenCode CLI not found"
echo "[INFO] Run Install-*.sh to set up OpenCode"
((WARNINGS++))
echo -e "${BLUE}[INFO]${NC} OpenCode CLI not found - Using Binary-Free Mode"
echo -e "${BLUE}[INFO]${NC} Free models (GPT-5 Nano, Grok Code, GLM-4.7) available via OpenCode Zen"
BINARY_FREE_MODE=1
fi
echo ""
@@ -84,10 +85,27 @@ SERVER_PORT=$DEFAULT_SERVER_PORT
UI_PORT=$DEFAULT_UI_PORT
for port in {3001..3050}; do
# Try lsof first, then ss, then netstat
if command -v lsof &> /dev/null; then
if ! lsof -i :$port -sTCP:LISTEN -t > /dev/null 2>&1; then
SERVER_PORT=$port
break
fi
elif command -v ss &> /dev/null; then
if ! ss -tuln | grep -q ":$port "; then
SERVER_PORT=$port
break
fi
elif command -v netstat &> /dev/null; then
if ! netstat -tuln | grep -q ":$port "; then
SERVER_PORT=$port
break
fi
else
# No port checking tools, just use default
SERVER_PORT=$port
break
fi
done
echo -e "${GREEN}[OK]${NC} Server port: $SERVER_PORT"
@@ -133,6 +151,12 @@ echo -e "${BLUE}[STATUS]${NC}"
echo ""
echo " Node.js: $NODE_VERSION"
echo " npm: $NPM_VERSION"
if [[ $BINARY_FREE_MODE -eq 1 ]]; then
echo " Mode: Binary-Free Mode (No OpenCode binary required)"
echo " Free Models: GPT-5 Nano, Grok Code, GLM-4.7, Doubao, Big Pickle"
else
echo " Mode: Full Mode (OpenCode binary available)"
fi
echo " Auto-fixes applied: $AUTO_FIXED"
echo " Warnings: $WARNINGS"
echo " Errors: $ERRORS"
@@ -158,6 +182,7 @@ elif [[ "$OSTYPE" == "linux-gnu"* ]]; then
fi
export CLI_PORT=$SERVER_PORT
export NOMADARCH_BINARY_FREE_MODE=$BINARY_FREE_MODE
npm run dev:electron
EXIT_CODE=$?

View File

@@ -6,7 +6,7 @@ color 0A
echo.
echo NomadArch Launcher (Windows)
echo Version: 0.4.0
echo Version: 0.5.0 - Binary-Free Mode
echo.
set SCRIPT_DIR=%~dp0
@@ -16,6 +16,7 @@ cd /d "%SCRIPT_DIR%"
set ERRORS=0
set WARNINGS=0
set AUTO_FIXED=0
set BINARY_FREE_MODE=0
echo [PREFLIGHT 1/7] Checking Dependencies...
@@ -42,33 +43,37 @@ for /f "tokens=*" %%i in ('npm --version') do set NPM_VERSION=%%i
echo [OK] npm: %NPM_VERSION%
echo.
echo [PREFLIGHT 2/7] Checking for OpenCode CLI...
echo [PREFLIGHT 2/7] Checking OpenCode CLI...
where opencode >nul 2>&1
if %ERRORLEVEL% equ 0 (
echo [OK] OpenCode CLI available in PATH
) else (
if exist "bin\opencode.exe" (
echo [OK] OpenCode binary found in bin/
) else (
echo [WARN] OpenCode CLI not found
echo [INFO] Run Install-Windows.bat to set up OpenCode
set /a WARNINGS+=1
)
echo [OK] OpenCode CLI in PATH - Full Mode
goto :opencode_check_done
)
if exist "bin\opencode.exe" (
echo [OK] OpenCode binary in bin/ - Full Mode
goto :opencode_check_done
)
echo [INFO] OpenCode CLI not found - Using Binary-Free Mode
echo [INFO] Free models: GPT-5 Nano, Grok Code, GLM-4.7 via OpenCode Zen
set BINARY_FREE_MODE=1
:opencode_check_done
echo.
echo [PREFLIGHT 3/7] Checking Dependencies...
if not exist "node_modules" (
echo [INFO] Dependencies not installed. Installing now...
call npm install
if %ERRORLEVEL% neq 0 (
if !ERRORLEVEL! neq 0 (
echo [ERROR] Dependency installation failed!
pause
exit /b 1
)
echo [OK] Dependencies installed (auto-fix)
echo [OK] Dependencies installed
set /a AUTO_FIXED+=1
) else (
echo [OK] Dependencies found
@@ -106,14 +111,14 @@ if not exist "packages\ui\dist\index.html" (
echo [INFO] Running UI build...
pushd packages\ui
call npm run build
if %ERRORLEVEL% neq 0 (
if !ERRORLEVEL! neq 0 (
echo [ERROR] UI build failed!
popd
set /a ERRORS+=1
goto :final_launch_check
)
popd
echo [OK] UI build completed (auto-fix)
echo [OK] UI build completed
set /a AUTO_FIXED+=1
) else (
echo [OK] UI build directory exists
@@ -123,12 +128,12 @@ if not exist "packages\electron-app\dist\main\main.js" (
echo [WARN] Electron build incomplete
echo [INFO] Running full build...
call npm run build
if %ERRORLEVEL% neq 0 (
if !ERRORLEVEL! neq 0 (
echo [ERROR] Full build failed!
set /a ERRORS+=1
goto :final_launch_check
)
echo [OK] Full build completed (auto-fix)
echo [OK] Full build completed
set /a AUTO_FIXED+=1
)
@@ -139,13 +144,19 @@ echo [STATUS]
echo.
echo Node.js: %NODE_VERSION%
echo npm: %NPM_VERSION%
if !BINARY_FREE_MODE! equ 1 (
echo Mode: Binary-Free Mode
echo Free Models: GPT-5 Nano, Grok Code, GLM-4.7, Doubao, Big Pickle
) else (
echo Mode: Full Mode with OpenCode
)
echo Auto-fixes applied: !AUTO_FIXED!
echo Warnings: %WARNINGS%
echo Errors: %ERRORS%
echo Warnings: !WARNINGS!
echo Errors: !ERRORS!
echo Server Port: !SERVER_PORT!
echo.
if %ERRORS% gtr 0 (
if !ERRORS! gtr 0 (
echo [RESULT] Cannot start due to errors!
echo.
echo Please fix the errors above and try again.
@@ -181,13 +192,14 @@ echo ========================================
set "VITE_DEV_SERVER_URL=http://localhost:!UI_PORT!"
set "NOMADARCH_OPEN_DEVTOOLS=false"
set "NOMADARCH_BINARY_FREE_MODE=!BINARY_FREE_MODE!"
call npm run dev:electron
if %ERRORLEVEL% neq 0 (
if !ERRORLEVEL! neq 0 (
echo.
echo [ERROR] NomadArch exited with an error!
echo.
echo Error Code: %ERRORLEVEL%
echo Error Code: !ERRORLEVEL!
echo.
echo Troubleshooting:
echo 1. Ensure port !SERVER_PORT! is not in use
@@ -200,4 +212,4 @@ if %ERRORLEVEL% neq 0 (
echo.
echo Press any key to exit...
pause >nul
exit /b %ERRORS%
exit /b !ERRORS!

152
Prepare-Public-Release.bat Normal file
View File

@@ -0,0 +1,152 @@
@echo off
setlocal enabledelayedexpansion
:: =====================================================
:: NomadArch - Clean Copy Script for Public Release
:: Creates a sanitized copy without sensitive data
:: =====================================================
title NomadArch Clean Copy for GitHub
echo.
echo =====================================================
echo NomadArch - Prepare Clean Public Release
echo =====================================================
echo.
set SCRIPT_DIR=%~dp0
set SCRIPT_DIR=%SCRIPT_DIR:~0,-1%
set DEST_DIR=%USERPROFILE%\Desktop\NomadArch-Public-Release
echo [INFO] Source: %SCRIPT_DIR%
echo [INFO] Destination: %DEST_DIR%
echo.
if exist "%DEST_DIR%" (
echo [WARN] Destination exists. Removing old copy...
rmdir /s /q "%DEST_DIR%"
)
echo [STEP 1/6] Creating destination directory...
mkdir "%DEST_DIR%"
echo [STEP 2/6] Copying core project files...
:: Copy essential files
copy "%SCRIPT_DIR%\package.json" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\package-lock.json" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\.gitignore" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\README.md" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\BUILD.md" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\AGENTS.md" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\PROGRESS.md" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\manual_test_guide.md" "%DEST_DIR%\" >nul
:: Copy launchers and installers
copy "%SCRIPT_DIR%\Install-*.bat" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\Install-*.sh" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\Launch-*.bat" "%DEST_DIR%\" >nul
copy "%SCRIPT_DIR%\Launch-*.sh" "%DEST_DIR%\" >nul
echo [STEP 3/6] Copying packages directory (source only)...
:: Use robocopy to exclude unwanted items
robocopy "%SCRIPT_DIR%\packages" "%DEST_DIR%\packages" /E /NFL /NDL /NJH /NJS /NC /NS ^
/XD node_modules dist out release .vite .electron-vite _backup_original __pycache__ ^
/XF *.log *.bak *.tmp *.map
echo [STEP 4/6] Copying additional directories...
:: Copy docs if exists
if exist "%SCRIPT_DIR%\docs" (
robocopy "%SCRIPT_DIR%\docs" "%DEST_DIR%\docs" /E /NFL /NDL /NJH /NJS /NC /NS /XD node_modules
)
:: Copy images if exists
if exist "%SCRIPT_DIR%\images" (
robocopy "%SCRIPT_DIR%\images" "%DEST_DIR%\images" /E /NFL /NDL /NJH /NJS /NC /NS
)
:: Copy dev-docs if exists
if exist "%SCRIPT_DIR%\dev-docs" (
robocopy "%SCRIPT_DIR%\dev-docs" "%DEST_DIR%\dev-docs" /E /NFL /NDL /NJH /NJS /NC /NS
)
:: Copy scripts directory if exists
if exist "%SCRIPT_DIR%\scripts" (
robocopy "%SCRIPT_DIR%\scripts" "%DEST_DIR%\scripts" /E /NFL /NDL /NJH /NJS /NC /NS
)
:: Copy .github directory (workflows, templates)
if exist "%SCRIPT_DIR%\.github" (
robocopy "%SCRIPT_DIR%\.github" "%DEST_DIR%\.github" /E /NFL /NDL /NJH /NJS /NC /NS
)
echo [STEP 5/6] Creating clean MCP config template...
:: Create a template .mcp.json with placeholders
(
echo {
echo "mcpServers": {
echo "sequential-thinking": {
echo "command": "npx",
echo "args": ["-y", "@modelcontextprotocol/server-sequential-thinking"]
echo },
echo "desktop-commander": {
echo "command": "npx",
echo "args": ["-y", "@modelcontextprotocol/server-desktop-commander"]
echo },
echo "web-reader": {
echo "command": "npx",
echo "args": ["-y", "@modelcontextprotocol/server-web-reader"]
echo },
echo "github": {
echo "command": "npx",
echo "args": ["-y", "@modelcontextprotocol/server-github"],
echo "env": {
echo "GITHUB_TOKEN": "YOUR_GITHUB_TOKEN_HERE"
echo }
echo }
echo }
echo }
) > "%DEST_DIR%\.mcp.json.example"
echo [STEP 6/6] Final cleanup...
:: Remove any accidentally copied sensitive files
if exist "%DEST_DIR%\.opencode" rmdir /s /q "%DEST_DIR%\.opencode"
if exist "%DEST_DIR%\.trae" rmdir /s /q "%DEST_DIR%\.trae"
if exist "%DEST_DIR%\.backup" rmdir /s /q "%DEST_DIR%\.backup"
if exist "%DEST_DIR%\.tmp-qwen-code" rmdir /s /q "%DEST_DIR%\.tmp-qwen-code"
if exist "%DEST_DIR%\.agent" rmdir /s /q "%DEST_DIR%\.agent"
if exist "%DEST_DIR%\install.log" del "%DEST_DIR%\install.log"
:: Delete any .bak files that got through
for /r "%DEST_DIR%" %%f in (*.bak) do del "%%f" 2>nul
for /r "%DEST_DIR%" %%f in (*.log) do del "%%f" 2>nul
for /r "%DEST_DIR%" %%f in (*.tmp) do del "%%f" 2>nul
:: Remove _backup_original directories
for /d /r "%DEST_DIR%" %%d in (_backup_original) do (
if exist "%%d" rmdir /s /q "%%d"
)
echo.
echo =====================================================
echo Clean Copy Complete!
echo =====================================================
echo.
echo Location: %DEST_DIR%
echo.
echo Next Steps:
echo 1. Review the contents of %DEST_DIR%
echo 2. cd %DEST_DIR%
echo 3. git init
echo 4. git add .
echo 5. git commit -m "Initial public release"
echo 6. git remote add origin https://github.com/YOUR_USER/NomadArch.git
echo 7. git push -u origin main
echo.
echo =====================================================
pause

566
README.md
View File

@@ -1,565 +1,347 @@
<!--
NomadArch - Advanced AI Coding Workspace
SEO Optimized: AI coding assistant, multi-model support, GLM 4.7, Z.AI API, autonomous coding, TypeScript, Electron
-->
<meta name="description" content="NomadArch - Advanced AI-powered coding workspace with multi-model support including GLM 4.7, Anthropic Claude, OpenAI GPT, and local Ollama models. Autonomous coding, real-time streaming, and intelligent code fixes.">
<meta name="keywords" content="AI coding assistant, GLM 4.7, Z.AI API, multi-model AI, autonomous coding, code generation, TypeScript, Electron, SolidJS, OpenAI, Anthropic, Qwen, Ollama">
<meta name="author" content="NeuralNomadsAI">
<meta name="robots" content="index, follow">
<meta property="og:title" content="NomadArch - Advanced AI Coding Workspace with GLM 4.7">
<meta property="og:description" content="Multi-model AI coding assistant featuring GLM 4.7, Claude, GPT, and local models. Autonomous coding, real-time streaming, intelligent fixes.">
<meta property="og:image" content="https://github.com/roman-ryzenadvanced/NomadArch-v1.0/raw/main/packages/ui/src/images/CodeNomad-Icon.png">
<meta property="og:type" content="website">
<meta property="og:url" content="https://github.com/roman-ryzenadvanced/NomadArch-v1.0">
<meta name="twitter:card" content="summary_large_image">
<meta name="twitter:title" content="NomadArch - Advanced AI Coding Workspace">
<meta name="twitter:description" content="Multi-model AI coding assistant featuring GLM 4.7, Claude, GPT, and local models.">
<meta name="twitter:image" content="https://github.com/roman-ryzenadvanced/NomadArch-v1.0/raw/main/packages/ui/src/images/CodeNomad-Icon.png">
<script type="application/ld+json">
{
"@context": "https://schema.org",
"@type": "SoftwareApplication",
"name": "NomadArch",
"operatingSystem": "Windows, macOS, Linux",
"applicationCategory": "DeveloperApplication",
"description": "Advanced AI-powered coding workspace with multi-model support including GLM 4.7, Anthropic Claude, OpenAI GPT, and local Ollama models",
"author": {
"@type": "Organization",
"name": "NeuralNomadsAI"
},
"offers": {
"@type": "Offer",
"price": "0",
"priceCurrency": "USD"
},
"featureList": [
"Multi-provider AI support",
"GLM 4.7 integration via Z.AI API",
"Autonomous coding with APEX mode",
"Real-time token streaming",
"Intelligent code fixes",
"Ollama local model support"
],
"softwareVersion": "1.0.0"
}
</script>
# NomadArch
<p align="center">
<img src="packages/ui/src/images/CodeNomad-Icon.png" alt="NomadArch Logo" width="180" height="180">
</p>
<h3 align="center">NomadArch - Advanced AI Coding Workspace</h3>
<h1 align="center">🏛️ NomadArch</h1>
<h3 align="center">Advanced AI Coding Workspace</h3>
<p align="center">
<strong>Fork of CodeNomad by OpenCode</strong>
<em>NomadArch is an enhanced fork of CodeNomad — now with GLM 4.7, multi-model support, and MULTIX Mode</em>
</p>
<p align="center">
<a href="https://github.com/roman-ryzenadvanced/NomadArch-v1.0/stargazers">
<img src="https://img.shields.io/github/stars/roman-ryzenadvanced/NomadArch-v1.0?style=social" alt="GitHub Stars">
<img src="https://img.shields.io/github/stars/roman-ryzenadvanced/NomadArch-v1.0?style=for-the-badge&logo=github&logoColor=white&color=gold" alt="GitHub Stars">
</a>
<a href="https://github.com/roman-ryzenadvanced/NomadArch-v1.0/network/members">
<img src="https://img.shields.io/github/forks/roman-ryzenadvanced/NomadArch-v1.0?style=social" alt="GitHub Forks">
<img src="https://img.shields.io/github/forks/roman-ryzenadvanced/NomadArch-v1.0?style=for-the-badge&logo=git&logoColor=white&color=blue" alt="GitHub Forks">
</a>
<a href="https://github.com/roman-ryzenadvanced/NomadArch-v1.0/blob/main/LICENSE">
<img src="https://img.shields.io/github/license/roman-ryzenadvanced/NomadArch-v1.0" alt="License">
</a>
<a href="https://github.com/roman-ryzenadvanced/NomadArch-v1.0/releases">
<img src="https://img.shields.io/github/v/release/roman-ryzenadvanced/NomadArch-v1.0" alt="Latest Release">
<img src="https://img.shields.io/github/license/roman-ryzenadvanced/NomadArch-v1.0?style=for-the-badge&color=green" alt="License">
</a>
</p>
<p align="center">
<a href="#features">Features</a> •
<a href="#supported-ai-models">AI Models</a> •
<a href="#installation">Installation</a> •
<a href="#usage">Usage</a> •
<a href="#whats-new">What's New</a> •
<a href="#credits">Credits</a>
<a href="#-features">Features</a> •
<a href="#-supported-ai-models">AI Models</a> •
<a href="#-installation">Installation</a> •
<a href="#-usage">Usage</a> •
<a href="#-whats-new">What's New</a> •
<a href="#-credits">Credits</a>
</p>
<p align="center">
<a href="https://github.com/roman-ryzenadvanced/NomadArch-v1.0">
<img src="https://img.shields.io/badge/Star%20this%20repo-%E2%AD%90-yellow?style=for-the-badge" alt="Star this repo">
<img src="https://img.shields.io/badge/⭐_Star_this_repo-yellow?style=for-the-badge" alt="Star this repo">
</a>
</p>
---
## Overview
## 🎯 Overview
NomadArch is an enhanced fork of CodeNomad by OpenCode, featuring significant UI/UX improvements, additional AI integrations, and a more robust architecture. This is a full-featured AI coding assistant with support for multiple AI providers including **GLM 4.7**, Anthropic, OpenAI, Google, Qwen, and local models via Ollama.
**NomadArch** is an enhanced fork of CodeNomad, featuring significant UI/UX improvements, additional AI integrations, and a more robust architecture. This is a full-featured AI coding assistant with support for multiple AI providers including **GLM 4.7**, Anthropic, OpenAI, Google, Qwen, and local models via Ollama.
### Key Improvements Over CodeNomad
- Fixed Qwen OAuth authentication flow
- Enhanced MULTIX Mode with live token streaming
- Improved UI/UX with detailed tooltips
- Auto-build verification on launch
- Comprehensive installer scripts for all platforms
- Port conflict detection and resolution hints
### Key Improvements Over CodeNomad
- 🔧 Fixed Qwen OAuth authentication flow
- 🚀 Enhanced MULTIX Mode with live token streaming
- 🎨 Improved UI/UX with detailed tooltips
- Auto-build verification on launch
- 📦 Comprehensive installer scripts for all platforms
- 🔌 Port conflict detection and resolution hints
- 🆓 **NEW: Binary-Free Mode** - No external binaries required!
### 🆓 Binary-Free Mode (v0.5.0)
NomadArch now works **without requiring the OpenCode binary**! This means:
| Benefit | Description |
|---------|-------------|
| ⚡ **Faster Setup** | No binary downloads, just npm install |
| 🌍 **Universal** | Works on all platforms without platform-specific binaries |
| 🆓 **Free Models** | Access free AI models without any binary |
| 🔄 **Seamless** | Automatically uses native mode when binary unavailable |
**Free Models Available (No API Key Required):**
- 🧠 **GPT-5 Nano** - 400K context, reasoning + tools
-**Grok Code Fast 1** - 256K context, optimized for code
- 🌟 **GLM-4.7** - 205K context, top-tier performance
- 🚀 **Doubao Seed Code** - 256K context, specialized for coding
- 🥒 **Big Pickle** - 200K context, efficient and fast
---
## Supported AI Models & Providers
## 🤖 Supported AI Models
NomadArch supports a wide range of AI models from multiple providers, giving you flexibility to choose the best model for your coding tasks.
### 🚀 Featured Model: GLM 4.7 (Z.AI)
### 🔥 Featured Model: GLM 4.7 (Z.AI)
**GLM 4.7** is the latest state-of-the-art open model from Z.AI, now fully integrated into NomadArch. Released in December 2025, GLM 4.7 ranks **#1 for Web Development** and **#6 overall** on the LM Arena leaderboard.
#### Key Features
- 🔥 **128K Context Window** - Process entire codebases in a single session
- 🧠 **Interleaved Thinking** - Advanced reasoning with multi-step analysis
- 💭 **Preserved Thinking** - Maintains reasoning chain across long conversations
- 🔄 **Turn-level Thinking** - Optimized per-response reasoning for efficiency
| Feature | Description |
|---------|-------------|
| 📊 **128K Context Window** | Process entire codebases in a single session |
| 🧠 **Interleaved Thinking** | Advanced reasoning with multi-step analysis |
| 💭 **Preserved Thinking** | Maintains reasoning chain across long conversations |
| 🔄 **Turn-level Thinking** | Optimized per-response reasoning for efficiency |
#### Benchmark Performance
| Benchmark | Score | Improvement |
|-----------|-------|-------------|
| Benchmark | Score | Notes |
|-----------|-------|-------|
| SWE-bench | **+73.8%** | Over GLM-4.6 |
| SWE-bench Multilingual | **+66.7%** | Over GLM-4.6 |
| Terminal Bench 2.0 | **+41%** | Over GLM-4.6 |
| LM Arena WebDev | **#1** | Open Model Ranking |
| LM Arena Overall | **#6** | Open Model Ranking |
GLM 4.7 beats GPT-5, Claude Sonnet, and Gemini on multiple coding benchmarks.
#### Z.AI API Integration
- ✅ Fully integrated via Z.AI Plan API
- ✅ Compatible with Claude Code, Cline, Roo Code, Kilo Code
- ✅ Get **10% discount** with code: [`R0K78RJKNW`](https://z.ai/subscribe?ic=R0K78RJKNW)
- 🎯 [Subscribe to Z.AI with 10% off](https://z.ai/subscribe?ic=R0K78RJKNW)
> 🎯 **Get 10% discount on Z.AI with code: [`R0K78RJKNW`](https://z.ai/subscribe?ic=R0K78RJKNW)**
---
### 🤖 All Supported Models
### 📋 All Supported Models
<details>
<summary><b>🌟 Z.AI Models</b></summary>
#### Z.AI
| Model | Context | Specialty |
|-------|---------|-----------|
| **GLM 4.7** | 128K | Web Development, Coding |
| GLM 4.6 | 128K | General Coding |
| GLM-4 | 128K | Versatile |
#### Anthropic
</details>
<details>
<summary><b>🟣 Anthropic Models</b></summary>
| Model | Context | Specialty |
|-------|---------|-----------|
| Claude 3.7 Sonnet | 200K | Complex Reasoning |
| Claude 3.5 Sonnet | 200K | Balanced Performance |
| Claude 3 Opus | 200K | Maximum Quality |
#### OpenAI
</details>
<details>
<summary><b>🟢 OpenAI Models</b></summary>
| Model | Context | Specialty |
|-------|---------|-----------|
| GPT-5 Preview | 200K | Latest Capabilities |
| GPT-4.1 | 128K | Production Ready |
| GPT-4 Turbo | 128K | Fast & Efficient |
#### Google
</details>
<details>
<summary><b>🔵 Google Models</b></summary>
| Model | Context | Specialty |
|-------|---------|-----------|
| Gemini 2.0 Pro | 1M+ | Massive Context |
| Gemini 2.0 Flash | 1M+ | Ultra Fast |
#### Qwen
| Model | Context | Specialty |
|-------|---------|-----------|
</details>
<details>
<summary><b>🟠 Qwen & Local Models</b></summary>
| Model | Context/Size | Specialty |
|-------|--------------|-----------|
| Qwen 2.5 Coder | 32K | Code Specialized |
| Qwen 2.5 | 32K | General Purpose |
| DeepSeek Coder (Ollama) | Varies | Code |
| Llama 3.1 (Ollama) | Varies | General |
#### Local (Ollama)
| Model | Size | Specialty |
|-------|------|-----------|
| DeepSeek Coder | Varies | Code |
| Llama 3.1 | Varies | General |
| CodeLlama | Varies | Code |
| Mistral | Varies | General |
</details>
---
## Installation
## 📦 Installation
### Quick Start (Recommended)
The installers will automatically install **OpenCode CLI** (required for workspace functionality) using:
1. **Primary**: `npm install -g opencode-ai@latest` (fastest)
2. **Fallback**: Download from official GitHub releases if npm fails
#### Windows
```batch
# Double-click and run
Install-Windows.bat
# Then start app
Launch-Windows.bat
```
#### Linux
```bash
chmod +x Install-Linux.sh
./Install-Linux.sh
# Then start app
chmod +x Install-Linux.sh && ./Install-Linux.sh
./Launch-Unix.sh
```
#### macOS
```bash
chmod +x Install-Mac.sh
./Install-Mac.sh
# Then start app
chmod +x Install-Mac.sh && ./Install-Mac.sh
./Launch-Unix.sh
```
### Manual Installation
```bash
# Clone the repository
git clone https://github.com/roman-ryzenadvanced/NomadArch-v1.0.git
cd NomadArch
# Install dependencies
npm install
# Start the application
npm run dev:electron
```
### Building from Source
```bash
# Build all packages
npm run build
# Or build individual packages
npm run build:ui # Build UI
npm run build:server # Build server
npm run build:electron # Build Electron app
```
---
## Features
## 🚀 Features
### Core Features
- 🤖 **Multi-Provider AI Support** - GLM 4.7, Anthropic, OpenAI, Google, Qwen, Ollama (local)
- 🖥️ **Electron Desktop App** - Native feel with modern web technologies
- 📁 **Workspace Management** - Organize your projects efficiently
- 💬 **Real-time Streaming** - Live responses from AI models
- 🔧 **Smart Fix** - AI-powered code error detection and fixes
- 🏗️ **Build Integration** - One-click project builds
- 🔌 **Ollama Integration** - Run local AI models for privacy
| Feature | Description |
|---------|-------------|
| 🤖 **Multi-Provider AI** | GLM 4.7, Anthropic, OpenAI, Google, Qwen, Ollama |
| 🖥️ **Electron Desktop App** | Native feel with modern web technologies |
| 📁 **Workspace Management** | Organize your projects efficiently |
| 💬 **Real-time Streaming** | Live responses from AI models |
| 🔧 **Smart Fix** | AI-powered code error detection and fixes |
| 🔌 **Ollama Integration** | Run local AI models for privacy |
### UI/UX Highlights
-**MULTIX Mode** - Multi-task parallel AI conversations with live token counting
- 🛡️ **SHIELD Mode** - Auto-approval for hands-free operation
- 🚀 **APEX Mode** - Autonomous AI that chains tasks together
- 📊 **Live Token Counter** - Real-time token usage during streaming
- 💭 **Thinking Indicator** - Animated visual feedback when AI is processing
- 🎨 **Modern Dark Theme** - Beautiful, eye-friendly dark interface
- 🖱️ **Detailed Tooltips** - Hover over any button for explanations
| Mode | Description |
|------|-------------|
| ⚡ **MULTIX Mode** | Multi-task parallel AI conversations with live token counting |
| 🛡️ **SHIELD Mode** | Auto-approval for hands-free operation |
| 🚀 **APEX Mode** | Autonomous AI that chains tasks together |
---
## What's New in NomadArch
## 🆕 What's New
### Major Improvements Over Original CodeNomad
<details>
<summary><b>🎨 Branding & Identity</b></summary>
#### 🎨 Branding & Identity
-**New Branding**: "NomadArch" with proper attribution to OpenCode
-**Updated Loading Screen**: New branding with fork attribution
-**Updated Empty States**: All screens show NomadArch branding
#### 🔐 Qwen OAuth Integration
-**Fixed OAuth Flow**: Resolved "Body cannot be empty" error in Qwen authentication
-**Proper API Bodies**: POST requests now include proper JSON bodies
-**Fixed Device Poll Schema**: Corrected Fastify schema validation for OAuth polling
</details>
<details>
<summary><b>🔐 Qwen OAuth Integration</b></summary>
-**Fixed OAuth Flow**: Resolved "Body cannot be empty" error
-**Proper API Bodies**: POST requests now include proper JSON bodies
-**Fixed Device Poll Schema**: Corrected Fastify schema validation
</details>
<details>
<summary><b>🚀 MULTIX Mode Enhancements</b></summary>
#### 🚀 MULTIX Mode Enhancements
-**Live Streaming Token Counter**: Visible in header during AI processing
-**Thinking Roller Indicator**: Animated indicator with bouncing dots
-**Token Stats Display**: Shows input/output tokens processed
-**Auto-Scroll**: Intelligent scrolling during streaming
#### 🖥️ UI/UX Improvements
-**Detailed Button Tooltips**: Hover over any button for detailed explanations
- AUTHED: Authentication status explanation
- AI MODEL: Model selection help
- SMART FIX: AI code analysis feature
- BUILD: Project compilation
- APEX: Autonomous mode description
- SHIELD: Auto-approval mode
- MULTIX MODE: Multi-task interface
-**Bulletproof Layout**: Fixed layout issues with Editor/MultiX panels
-**Overflow Handling**: Long code lines don't break layout
-**Responsive Panels**: Editor and chat panels properly sized
</details>
#### 📂 File Editor Improvements
-**Proper File Loading**: Files load correctly when selected in explorer
-**Line Numbers**: Clean line number display
-**Word Wrap**: Long lines wrap instead of overflowing
<details>
<summary><b>🐛 Bug Fixes</b></summary>
#### 🔧 Developer Experience
-**Disabled Auto-Browser Open**: Dev server no longer opens browser automatically
-**Unified Installers**: One-click installers for Windows, Linux, and macOS
-**Enhanced Launchers**: Auto-fix capabilities, dependency checking, build verification
-**Port Conflict Detection**: Warns if default ports are in use
-**Error Recovery**: Provides actionable error messages with fixes
#### 🐛 Bug Fixes
- ✅ Fixed Qwen OAuth "empty body" errors
- ✅ Fixed MultiX panel being pushed off screen when Editor is open
- ✅ Fixed top menu/toolbar disappearing when file is selected
- ✅ Fixed layout breaking when scrolling in Editor or Chat
- ✅ Fixed auto-scroll interrupting manual scrolling
- ✅ Fixed sessions not showing on workspace first entry
- ✅ Fixed MultiX panel being pushed off screen
- ✅ Fixed top menu/toolbar disappearing
- ✅ Fixed layout breaking when scrolling
- ✅ Fixed sessions not showing on workspace entry
</details>
---
## Button Features Guide
## 🎮 Button Guide
| Button | Description |
|--------|-------------|
| **AUTHED** | Shows authentication status. Green = connected, Red = not authenticated |
| **AI MODEL** | Click to switch between AI models (GLM 4.7, Claude, GPT, etc.) |
| **SMART FIX** | AI analyzes your code for errors and automatically applies fixes |
| **BUILD** | Compiles and builds your project using detected build system |
| **APEX** | Autonomous mode - AI chains actions without waiting for approval |
| **SHIELD** | Auto-approval mode - AI makes changes without confirmation prompts |
| **MULTIX MODE** | Opens multi-task pipeline for parallel AI conversations |
| **AUTHED** | Shows authentication status (Green = connected) |
| **AI MODEL** | Click to switch between AI models |
| **SMART FIX** | AI analyzes code for errors and applies fixes |
| **BUILD** | Compiles and builds your project |
| **APEX** | Autonomous mode - AI chains actions automatically |
| **SHIELD** | Auto-approval mode - AI makes changes without prompts |
| **MULTIX MODE** | Opens multi-task pipeline for parallel conversations |
---
## Folder Structure
## 📁 Project Structure
```
NomadArch/
├── Install-Windows.bat # Windows installer with dependency checking
├── Install-Linux.sh # Linux installer with distro support
├── Install-Mac.sh # macOS installer with Apple Silicon support
├── Launch-Windows.bat # Windows launcher with auto-fix
├── Launch-Dev-Windows.bat # Windows developer mode launcher
├── Launch-Unix.sh # Linux/macOS launcher
├── Install-*.bat/.sh # Platform installers
├── Launch-*.bat/.sh # Platform launchers
├── packages/
│ ├── electron-app/ # Electron main process
│ ├── server/ # Backend server (Fastify)
│ ├── server/ # Backend (Fastify)
│ ├── ui/ # Frontend (SolidJS + Vite)
│ ├── tauri-app/ # Tauri alternative desktop app
│ └── opencode-config/ # OpenCode configuration
── README.md # This file
└── package.json # Root package manifest
── README.md
```
---
## Requirements
## 🔧 Requirements
- **Node.js**: v18 or higher
- **npm**: v9 or higher
- **Git**: For version control features
- **OS**: Windows 10+, macOS 11+ (Big Sur), or Linux (Ubuntu 20.04+, Fedora, Arch, OpenSUSE)
### Platform-Specific Requirements
**Windows**:
- Administrator privileges recommended for installation
- 2GB free disk space
**Linux**:
- Build tools (gcc, g++, make)
- Package manager (apt, dnf, pacman, or zypper)
**macOS**:
- Xcode Command Line Tools
- Homebrew (recommended)
- Rosetta 2 for Apple Silicon (for x86_64 compatibility)
| Requirement | Version |
|-------------|---------|
| Node.js | v18+ |
| npm | v9+ |
| OS | Windows 10+, macOS 11+, Linux |
---
## Troubleshooting
## 🆘 Troubleshooting
### "Dependencies not installed" Error
Run the installer script first:
- Windows: `Install-Windows.bat`
- Linux: `./Install-Linux.sh`
- macOS: `./Install-Mac.sh`
<details>
<summary><b>Common Issues & Solutions</b></summary>
### "opencode not found" or Workspace Creation Fails
The installer should automatically install OpenCode CLI. If it fails:
**Option 1 - Manual npm install:**
**Dependencies not installed?**
```bash
npm install -g opencode-ai@latest
# Run the installer for your platform
Install-Windows.bat # Windows
./Install-Linux.sh # Linux
./Install-Mac.sh # macOS
```
**Option 2 - Manual download:**
1. Visit: https://github.com/sst/opencode/releases/latest
2. Download the appropriate ZIP for your platform:
- Windows: `opencode-windows-x64.zip`
- Linux x64: `opencode-linux-x64.zip`
- Linux ARM64: `opencode-linux-arm64.zip`
- macOS Intel: `opencode-darwin-x64.zip`
- macOS Apple Silicon: `opencode-darwin-arm64.zip`
3. Extract and place `opencode` or `opencode.exe` in the `bin/` folder
**Port conflict?**
```bash
# Kill process on port 3000/3001
taskkill /F /PID <PID> # Windows
kill -9 <PID> # Unix
```
### Port 3000 or 3001 Already in Use
The launchers will detect port conflicts and warn you. To fix:
1. Close other applications using these ports
2. Check for running NomadArch instances
3. Kill the process: `taskkill /F /PID <PID>` (Windows) or `kill -9 <PID>` (Unix)
**OAuth fails?**
1. Check internet connection
2. Complete OAuth in browser
3. Clear browser cookies and retry
### Layout Issues
If the UI looks broken, try:
1. Refresh the app (Ctrl+R or Cmd+R)
2. Restart the application
3. Clear node_modules and reinstall: `rm -rf node_modules && npm install`
### OAuth Authentication Fails
1. Check your internet connection
2. Ensure you completed the OAuth flow in your browser
3. Try logging out and back in
4. Clear browser cookies for the OAuth provider
### Build Errors
1. Ensure you have the latest Node.js (18+)
2. Clear npm cache: `npm cache clean --force`
3. Delete node_modules: `rm -rf node_modules` (or `rmdir /s /q node_modules` on Windows)
4. Reinstall: `npm install`
### Sessions Not Showing on Workspace Entry
This has been fixed with SSE connection waiting. The app now waits for the Server-Sent Events connection to be established before fetching sessions.
</details>
---
## Credits
## 🙏 Credits
### Core Framework & Build Tools
Built with amazing open source projects:
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [SolidJS](https://www.solidjs.com/) | ^1.8.0 | Reactive JavaScript UI framework | MIT |
| [Vite](https://vitejs.dev/) | ^5.0.0 | Next-generation frontend build tool | MIT |
| [TypeScript](https://www.typescriptlang.org/) | ^5.3.0 - 5.6.3 | JavaScript with type system | Apache-2.0 |
| [Electron](https://www.electronjs.org/) | Via electron-app | Cross-platform desktop app framework | MIT |
| [Tauri](https://tauri.app/) | Via tauri-app | Alternative desktop app framework | Apache-2.0/MIT |
### UI Components & Styling
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [@suid/material](https://suid.io/) | ^0.19.0 | Material Design components for SolidJS | MIT |
| [@suid/icons-material](https://suid.io/) | ^0.9.0 | Material Design icons for SolidJS | MIT |
| [@suid/system](https://suid.io/) | ^0.14.0 | System components for SolidJS | MIT |
| [@kobalte/core](https://kobalte.dev/) | 0.13.11 | Accessible, unstyled UI components | MIT |
| [TailwindCSS](https://tailwindcss.com/) | ^3.0.0 | Utility-first CSS framework | MIT |
| [PostCSS](https://postcss.org/) | ^8.5.6 | CSS transformation tool | MIT |
| [Autoprefixer](https://github.com/postcss/autoprefixer) | ^10.4.21 | Parse CSS and add vendor prefixes | MIT |
### Routing & State Management
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [@solidjs/router](https://github.com/solidjs/solid-router) | ^0.13.0 | Router for SolidJS | MIT |
### Markdown & Code Display
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [Marked](https://marked.js.org/) | ^12.0.0 | Markdown parser and compiler | MIT |
| [GitHub Markdown CSS](https://github.com/sindresorhus/github-markdown-css) | ^5.8.1 | Markdown styling from GitHub | MIT |
| [Shiki](https://shiki.style/) | ^3.13.0 | Syntax highlighting | MIT |
| [@git-diff-view/solid](https://github.com/git-diff-view/git-diff-view) | ^0.0.8 | Git diff visualization for SolidJS | MIT |
### Icons & Visuals
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [Lucide Solid](https://lucide.dev/) | ^0.300.0 | Beautiful & consistent icon toolkit | ISC |
| [QRCode](https://github.com/soldair/node-qrcode) | ^1.5.3 | QR code generation | MIT |
### Backend & Server
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [Fastify](https://www.fastify.io/) | ^4.28.1 | Fast and low overhead web framework | MIT |
| [@fastify/cors](https://github.com/fastify/fastify-cors) | ^8.5.0 | CORS support for Fastify | MIT |
| [@fastify/reply-from](https://github.com/fastify/fastify-reply-from) | ^9.8.0 | Proxy support for Fastify | MIT |
| [@fastify/static](https://github.com/fastify/fastify-static) | ^7.0.4 | Static file serving for Fastify | MIT |
| [Ollama](https://ollama.com/) | ^0.5.0 | Local AI model integration | MIT |
### AI & SDK
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [OpenCode CLI](https://github.com/sst/opencode) | v1.0.191 | Open source AI coding agent - Required for workspace functionality | MIT |
| [@opencode-ai/sdk](https://github.com/opencode/ai-sdk) | ^1.0.138 | OpenCode AI SDK | Custom |
| [google-auth-library](https://github.com/googleapis/google-auth-library-nodejs) | ^10.5.0 | Google OAuth authentication | Apache-2.0 |
### HTTP & Networking
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [Axios](https://axios-http.com/) | ^1.6.0 | Promise-based HTTP client | MIT |
| [undici](https://undici.nodejs.org/) | ^6.19.8 | HTTP/1.1 client for Node.js | MIT |
| [node-fetch](https://github.com/node-fetch/node-fetch) | ^3.3.2 | A light-weight module that brings window.fetch to Node.js | MIT |
### Utilities & Helpers
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [Nanoid](https://github.com/ai/nanoid) | ^5.0.4 | Unique string ID generator | MIT |
| [Debug](https://github.com/debug-js/debug) | ^4.4.3 | Debug logging utility | MIT |
| [Pino](https://getpino.io/) | ^9.4.0 | Extremely fast Node.js logger | MIT |
| [FuzzySort](https://github.com/farzher/fuzzysort) | ^2.0.4 | Fuzzy search and sort | MIT |
| [Zod](https://zod.dev/) | ^3.23.8 | TypeScript-first schema validation | MIT |
| [Commander](https://github.com/tj/commander.js) | ^12.1.0 | Node.js command-line interface | MIT |
| [7zip-bin](https://github.com/felixrieseberg/7zip-bin) | ^5.2.0 | 7-Zip binary wrapper | MIT |
### Notifications & Feedback
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [solid-toast](https://github.com/ThisIsFlorian/solid-toast) | ^0.5.0 | Toast notifications for SolidJS | MIT |
### Desktop Integration
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [@tauri-apps/api](https://tauri.app/) | ^2.9.1 | Tauri API for desktop integration | Apache-2.0/MIT |
| [@tauri-apps/plugin-opener](https://tauri.app/) | ^2.5.2 | Tauri plugin for opening URLs/paths | Apache-2.0/MIT |
### Development Tools
| Project | Version | Description | License |
|----------|----------|-------------|----------|
| [Vite Plugin Solid](https://github.com/solidjs/vite-plugin-solid) | ^2.10.0 | Vite plugin for SolidJS | MIT |
| [ts-node](https://github.com/TypeStrong/ts-node) | ^10.9.2 | TypeScript execution and REPL | MIT |
| [tsx](https://github.com/privatenumber/tsx) | ^4.20.6 | TypeScript execution | MIT |
| [cross-env](https://github.com/kentcdodds/cross-env) | ^7.0.3 | Set environment variables across platforms | MIT |
| Category | Projects |
|----------|----------|
| **Framework** | SolidJS, Vite, TypeScript, Electron |
| **UI** | TailwindCSS, Kobalte, SUID Material |
| **Backend** | Fastify, Ollama |
| **AI** | OpenCode CLI, Various AI SDKs |
---
## Project Fork
## 📄 License
| Project | Repository | Description |
|----------|-------------|-------------|
| [CodeNomad](https://github.com/opencode/codenom) | OpenCode - Original AI coding workspace |
| [NomadArch](https://github.com/roman-ryzenadvanced/NomadArch-v1.0) | Enhanced fork by NeuralNomadsAI |
---
## License
This project is a fork of CodeNomad by OpenCode. Please refer to the original project for licensing information.
All third-party libraries listed above retain their respective licenses.
This project is a fork of [CodeNomad](https://github.com/opencode/codenom).
---
@@ -568,5 +350,5 @@ All third-party libraries listed above retain their respective licenses.
</p>
<p align="center">
Forked from <a href="https://github.com/opencode/codenom">CodeNomad by OpenCode</a>
<sub>NomadArch is an enhanced fork of CodeNomad</sub>
</p>

76
manual_test_guide.md Normal file
View File

@@ -0,0 +1,76 @@
# MultiX v2 - Verification & User Guide
**Date:** 2025-12-25
**Version:** 2.0.0 (Gold Master)
---
## 1. Feature Verification Guide
### A. Core Multi-Tasking & Parallel Execution
* **Goal:** Verify you can run multiple agents at once without freezing.
* **Steps:**
1. Create **Task 1**. Type "Write a long story about space" and hit Launch.
2. *While Task 1 is streaming*, click the **+** button to create **Task 2**.
3. Type "Write a python script for fibonacci" in Task 2 and hit Launch.
4. **Result:** Both tasks should stream simultaneously. Switching tabs should be instant.
### B. Per-Task Isolation (Agents & Models)
* **Goal:** Verify each task retains its own settings.
* **Steps:**
1. Go to **Task 1**. Select Agent: **"Software Engineer"** and Model: **"minimax-m2"**.
2. Go to **Task 2**. Select Agent: **"Writer"** and Model: **"deepseek-chat"**.
3. Switch back and forth.
4. **Result:** The selectors should update to reflect the saved state for each task.
### C. AI Agent Generator (NEW)
* **Goal:** Create a custom agent using AI.
* **Steps:**
1. Open the **Agent Selector** dropdown.
2. Click **"✨ AI Agent Generator"**.
3. Type: *"A rust expert who is sarcastic and funny"*.
4. Click **"Generate Agent"**.
5. Review the generated name, description, and system prompt.
6. Click **"Save & Use Agent"**.
7. **Result:** The new agent is saved and immediately selected.
### D. Prompt Enhancer
* **Goal:** strict Opus 4.5 prompt optimization.
* **Steps:**
1. Type a simple prompt: *"fix bug"*.
2. Click the **Magic Wand (✨)** button in the input area.
3. **Result:** The prompt is expanded into a professional, structured request using the active model.
### E. Compaction System
* **Goal:** Manage context window usage.
* **Steps:**
1. In a long chat, look for the **"Compact suggested"** banner at the top of the chat list.
2. Click **"Compact"** in the banner or the header bar.
3. **Result:** The session history is summarized, freeing up tokens while keeping context.
---
## 2. Menu & Wiring Check
| Button | Wired Action | Status |
|--------|--------------|--------|
| **MULTIX Badge** | Visual Indicator | ✅ Active |
| **SKILLS** | Opens Sidebar (Events) | ✅ Wired |
| **Active Task** | Shows current task name | ✅ Wired |
| **Pipeline Tab** | Switches to Dashboard | ✅ Wired |
| **Task Tabs** | Switch/Close Tasks | ✅ Wired |
| **Compact Btn** | Triggers Compaction | ✅ Wired |
| **API Key Btn** | Opens Settings Modal | ✅ Wired |
| **Agent Select** | Updates Task Session | ✅ Wired |
| **Model Select** | Updates Task Session | ✅ Wired |
---
## 3. Technical Status
* **Build:** Passing (No TypeScript errors).
* **Dev Server:** Running on port 3001.
* **Architecture:** Polling-based (150ms sync) to prevent UI thread blocking.
* **State:** Local signals + Non-reactive store references.
**Ready for deployment.**

14
package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "codenomad-workspace",
"version": "0.4.0",
"version": "0.5.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "codenomad-workspace",
"version": "0.4.0",
"version": "0.5.0",
"dependencies": {
"7zip-bin": "^5.2.0",
"google-auth-library": "^10.5.0"
@@ -9608,6 +9608,15 @@
"node": ">=14.17"
}
},
"node_modules/ulid": {
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/ulid/-/ulid-3.0.2.tgz",
"integrity": "sha512-yu26mwteFYzBAot7KVMqFGCVpsF6g8wXfJzQUHvu1no3+rRRSFcSV2nKeYvNPLD2J4b08jYBDhHUjeH0ygIl9w==",
"license": "MIT",
"bin": {
"ulid": "dist/cli.js"
}
},
"node_modules/undici": {
"version": "6.22.0",
"resolved": "https://registry.npmjs.org/undici/-/undici-6.22.0.tgz",
@@ -10604,6 +10613,7 @@
"fastify": "^4.28.1",
"fuzzysort": "^2.0.4",
"pino": "^9.4.0",
"ulid": "^3.0.2",
"undici": "^6.19.8",
"zod": "^3.23.8"
},

View File

@@ -1,6 +1,6 @@
{
"name": "codenomad-workspace",
"version": "0.4.0",
"version": "0.5.0",
"private": true,
"description": "CodeNomad monorepo workspace",
"workspaces": {

View File

@@ -68,10 +68,23 @@ export function setupCliIPC(mainWindow: BrowserWindow, cliManager: CliProcessMan
})
ipcMain.handle("users:createGuest", async () => {
const user = createGuestUser()
// Set up isolated environment for guest user
const root = getUserDataRoot(user.id)
cliManager.setUserEnv({
CODENOMAD_USER_DIR: root,
CLI_CONFIG: path.join(root, "config.json"),
})
await cliManager.stop()
const devMode = process.env.NODE_ENV === "development"
await cliManager.start({ dev: devMode })
// Set as active user
setActiveUser(user.id)
return user
})
ipcMain.handle("users:login", async (_, payload: { id: string; password?: string }) => {
console.log("[IPC:users:login] Attempting login for:", payload.id, "password length:", payload.password?.length)
const ok = verifyPassword(payload.id, payload.password ?? "")
console.log("[IPC:users:login] verifyPassword result:", ok)
if (!ok) {
return { success: false }
}

View File

@@ -5,7 +5,7 @@ import { fileURLToPath } from "url"
import { createApplicationMenu } from "./menu"
import { setupCliIPC } from "./ipc"
import { CliProcessManager } from "./process-manager"
import { ensureDefaultUsers, getActiveUser, getUserDataRoot, clearGuestUsers } from "./user-store"
import { ensureDefaultUsers, getActiveUser, getUserDataRoot, clearGuestUsers, logoutActiveUser } from "./user-store"
const mainFilename = fileURLToPath(import.meta.url)
const mainDirname = dirname(mainFilename)
@@ -28,6 +28,16 @@ if (isMac) {
app.commandLine.appendSwitch("disable-spell-checking")
}
// Windows: Use Edge WebView2 rendering for better performance
if (process.platform === "win32") {
app.commandLine.appendSwitch("enable-features", "WebViewTagWebComponent,WebView2")
app.commandLine.appendSwitch("disable-gpu-sandbox")
app.commandLine.appendSwitch("enable-gpu-rasterization")
app.commandLine.appendSwitch("enable-zero-copy")
app.commandLine.appendSwitch("disable-background-timer-throttling")
app.commandLine.appendSwitch("disable-renderer-backgrounding")
}
function getIconPath() {
if (app.isPackaged) {
return join(process.resourcesPath, "icon.png")
@@ -471,6 +481,8 @@ if (isMac) {
}
app.whenReady().then(() => {
clearGuestUsers()
logoutActiveUser()
ensureDefaultUsers()
applyUserEnvToCli()
startCli()

View File

@@ -111,19 +111,50 @@ function migrateLegacyData(targetDir: string) {
export function ensureDefaultUsers(): UserRecord {
const store = readStore()
if (store.users.length > 0) {
const active = store.users.find((u) => u.id === store.activeUserId) ?? store.users[0]
if (!store.activeUserId) {
store.activeUserId = active.id
// If roman exists, ensure his password is updated to the new required one if it matches the old default
const roman = store.users.find(u => u.name === "roman")
if (roman && roman.salt && roman.passwordHash) {
const oldDefaultHash = hashPassword("q1w2e3r4", roman.salt)
if (roman.passwordHash === oldDefaultHash) {
console.log("[UserStore] Updating roman's password to new default")
const newSalt = generateSalt()
roman.salt = newSalt
roman.passwordHash = hashPassword("!@#$q1w2e3r4", newSalt)
roman.updatedAt = nowIso()
writeStore(store)
}
// NEW: Check if roman needs data migration (e.g. if he was created before migration logic was robust)
const userDir = getUserDir(roman.id)
const configPath = path.join(userDir, "config.json")
let needsMigration = !existsSync(configPath)
if (!needsMigration) {
try {
const config = JSON.parse(readFileSync(configPath, "utf-8"))
if (!config.recentFolders || config.recentFolders.length === 0) {
needsMigration = true
}
} catch (e) {
needsMigration = true
}
}
if (needsMigration) {
console.log(`[UserStore] Roman exists but seems to have missing data. Triggering migration to ${userDir}...`)
migrateLegacyData(userDir)
}
}
if (store.users.length > 0) {
const active = store.users.find((u) => u.id === store.activeUserId) ?? store.users[0]
return active
}
const existingIds = new Set<string>()
const userId = ensureUniqueId("roman", existingIds)
const salt = generateSalt()
const passwordHash = hashPassword("q1w2e3r4", salt)
const passwordHash = hashPassword("!@#$q1w2e3r4", salt)
const record: UserRecord = {
id: userId,
name: "roman",
@@ -134,7 +165,6 @@ export function ensureDefaultUsers(): UserRecord {
}
store.users.push(record)
store.activeUserId = record.id
writeStore(store)
const userDir = getUserDir(record.id)
@@ -153,6 +183,13 @@ export function getActiveUser(): UserRecord | null {
return store.users.find((user) => user.id === store.activeUserId) ?? null
}
export function logoutActiveUser() {
const store = readStore()
store.activeUserId = undefined
writeStore(store)
console.log("[UserStore] Active user logged out")
}
export function setActiveUser(userId: string) {
const store = readStore()
const user = store.users.find((u) => u.id === userId)
@@ -239,10 +276,20 @@ export function deleteUser(userId: string) {
export function verifyPassword(userId: string, password: string): boolean {
const store = readStore()
const user = store.users.find((u) => u.id === userId)
if (!user) return false
if (!user) {
console.log("[verifyPassword] User not found:", userId)
return false
}
if (user.isGuest) return true
if (!user.salt || !user.passwordHash) return false
return hashPassword(password, user.salt) === user.passwordHash
if (!user.salt || !user.passwordHash) {
console.log("[verifyPassword] No salt or hash for user:", userId)
return false
}
const computed = hashPassword(password, user.salt)
const matches = computed === user.passwordHash
console.log("[verifyPassword] userId:", userId, "password:", JSON.stringify(password), "len:", password.length)
console.log("[verifyPassword] computed:", computed, "stored:", user.passwordHash, "matches:", matches)
return matches
}
export function getUserDataRoot(userId: string) {

View File

@@ -1,3 +1,204 @@
{
"$schema": "https://opencode.ai/config.json"
"$schema": "https://opencode.ai/config.json",
// Antigravity plugin for Google OAuth (Gemini + Claude models via Antigravity)
"plugin": [
"opencode-antigravity-auth@1.2.6"
],
"provider": {
// Antigravity models (via Google OAuth)
"google": {
"models": {
// Gemini Models
"gemini-3-pro-low": {
"name": "Gemini 3 Pro Low (Antigravity)",
"limit": {
"context": 1048576,
"output": 65535
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
"gemini-3-pro-high": {
"name": "Gemini 3 Pro High (Antigravity)",
"limit": {
"context": 1048576,
"output": 65535
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
"gemini-3-flash": {
"name": "Gemini 3 Flash (Antigravity)",
"limit": {
"context": 1048576,
"output": 65536
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
// Claude Models (via Antigravity)
"claude-sonnet-4-5": {
"name": "Claude Sonnet 4.5 (Antigravity)",
"limit": {
"context": 200000,
"output": 64000
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
"claude-sonnet-4-5-thinking-low": {
"name": "Claude Sonnet 4.5 Thinking Low (Antigravity)",
"limit": {
"context": 200000,
"output": 64000
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
"claude-sonnet-4-5-thinking-medium": {
"name": "Claude Sonnet 4.5 Thinking Medium (Antigravity)",
"limit": {
"context": 200000,
"output": 64000
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
"claude-sonnet-4-5-thinking-high": {
"name": "Claude Sonnet 4.5 Thinking High (Antigravity)",
"limit": {
"context": 200000,
"output": 64000
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
"claude-opus-4-5-thinking-low": {
"name": "Claude Opus 4.5 Thinking Low (Antigravity)",
"limit": {
"context": 200000,
"output": 64000
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
"claude-opus-4-5-thinking-medium": {
"name": "Claude Opus 4.5 Thinking Medium (Antigravity)",
"limit": {
"context": 200000,
"output": 64000
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
"claude-opus-4-5-thinking-high": {
"name": "Claude Opus 4.5 Thinking High (Antigravity)",
"limit": {
"context": 200000,
"output": 64000
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
},
// Other Models
"gpt-oss-120b-medium": {
"name": "GPT-OSS 120B Medium (Antigravity)",
"limit": {
"context": 131072,
"output": 32768
},
"modalities": {
"input": [
"text",
"image",
"pdf"
],
"output": [
"text"
]
}
}
}
}
}
}

View File

@@ -1,18 +1,8 @@
import { tool } from "@opencode-ai/plugin/tool"
// NomadArch Plugin Template
// This file is a placeholder. OpenCode plugins are optional.
// To create a plugin, see: https://opencode.ai/docs/plugins
export async function HelloPlugin() {
return {
tool: {
hello: tool({
description: "Return a friendly greeting",
args: {
name: tool.schema.string().optional().describe("Name to greet"),
},
async execute(args) {
const target = args.name?.trim() || "CodeNomad"
return `Hello, ${target}!`
},
}),
},
}
export async function init() {
// No-op placeholder - customize as needed
return {}
}

View File

@@ -31,6 +31,7 @@
"fastify": "^4.28.1",
"fuzzysort": "^2.0.4",
"pino": "^9.4.0",
"ulid": "^3.0.2",
"undici": "^6.19.8",
"zod": "^3.23.8"
},

View File

@@ -16,6 +16,8 @@ export interface Task {
status: TaskStatus
timestamp: number
messageIds?: string[] // IDs of messages associated with this task
taskSessionId?: string
archived?: boolean
}
export interface SessionTasks {
@@ -190,6 +192,16 @@ export interface InstanceData {
agentModelSelections: AgentModelSelection
sessionTasks?: SessionTasks // Multi-task chat support: tasks per session
sessionSkills?: Record<string, SkillSelection[]> // Selected skills per session
sessionMessages?: Record<
string,
Array<{
id: string
role: "user" | "assistant" | "system" | "tool"
content?: string
createdAt?: number
updatedAt?: number
}>
>
customAgents?: Array<{
name: string
description?: string

View File

@@ -0,0 +1,189 @@
/**
* Context Engine HTTP Client
* Communicates with the Context-Engine RAG service for code retrieval and memory management.
*/
import { Logger } from "../logger"
export interface ContextEngineConfig {
/** Base URL of the Context-Engine API (default: http://localhost:8000) */
baseUrl: string
/** Request timeout in milliseconds (default: 30000) */
timeout: number
}
export interface IndexRequest {
path: string
recursive?: boolean
}
export interface IndexResponse {
status: "started" | "completed" | "error"
indexed_files?: number
message?: string
}
export interface QueryRequest {
query: string
context_window?: number
top_k?: number
}
export interface QueryResponse {
results: Array<{
content: string
file_path: string
score: number
metadata?: Record<string, unknown>
}>
total_results: number
}
export interface MemoryRequest {
text: string
metadata?: Record<string, unknown>
}
export interface MemoryResponse {
id: string
status: "added" | "error"
}
export interface HealthResponse {
status: "healthy" | "unhealthy"
version?: string
indexed_files?: number
}
const DEFAULT_CONFIG: ContextEngineConfig = {
baseUrl: "http://localhost:8000",
timeout: 30000,
}
export class ContextEngineClient {
private config: ContextEngineConfig
private logger: Logger
constructor(config: Partial<ContextEngineConfig> = {}, logger: Logger) {
this.config = { ...DEFAULT_CONFIG, ...config }
this.logger = logger
}
/**
* Check if the Context-Engine is healthy and responding
*/
async health(): Promise<HealthResponse> {
try {
const response = await this.request<HealthResponse>("/health", {
method: "GET",
})
return response
} catch (error) {
this.logger.debug({ error }, "Context-Engine health check failed")
return { status: "unhealthy" }
}
}
/**
* Trigger indexing for a project path
*/
async index(path: string, recursive = true): Promise<IndexResponse> {
this.logger.info({ path, recursive }, "Triggering Context-Engine indexing")
try {
const response = await this.request<IndexResponse>("/index", {
method: "POST",
body: JSON.stringify({ path, recursive } as IndexRequest),
})
this.logger.info({ path, response }, "Context-Engine indexing response")
return response
} catch (error) {
this.logger.error({ path, error }, "Context-Engine indexing failed")
return {
status: "error",
message: error instanceof Error ? error.message : "Unknown error",
}
}
}
/**
* Query the Context-Engine for relevant code snippets
*/
async query(prompt: string, contextWindow = 4096, topK = 5): Promise<QueryResponse> {
this.logger.debug({ prompt: prompt.slice(0, 100), contextWindow, topK }, "Querying Context-Engine")
try {
const response = await this.request<QueryResponse>("/query", {
method: "POST",
body: JSON.stringify({
query: prompt,
context_window: contextWindow,
top_k: topK,
} as QueryRequest),
})
this.logger.debug({ resultCount: response.results.length }, "Context-Engine query completed")
return response
} catch (error) {
this.logger.warn({ error }, "Context-Engine query failed")
return { results: [], total_results: 0 }
}
}
/**
* Add a memory/rule to the Context-Engine for session-specific context
*/
async addMemory(text: string, metadata?: Record<string, unknown>): Promise<MemoryResponse> {
this.logger.debug({ textLength: text.length }, "Adding memory to Context-Engine")
try {
const response = await this.request<MemoryResponse>("/memory", {
method: "POST",
body: JSON.stringify({ text, metadata } as MemoryRequest),
})
return response
} catch (error) {
this.logger.warn({ error }, "Context-Engine addMemory failed")
return { id: "", status: "error" }
}
}
/**
* Get the current indexing status
*/
async getStatus(): Promise<{ indexing: boolean; indexed_files: number; last_indexed?: string }> {
try {
const response = await this.request<{ indexing: boolean; indexed_files: number; last_indexed?: string }>("/status", {
method: "GET",
})
return response
} catch (error) {
return { indexing: false, indexed_files: 0 }
}
}
private async request<T>(endpoint: string, options: RequestInit): Promise<T> {
const url = `${this.config.baseUrl}${endpoint}`
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), this.config.timeout)
try {
const response = await fetch(url, {
...options,
headers: {
"Content-Type": "application/json",
...options.headers,
},
signal: controller.signal,
})
if (!response.ok) {
const errorText = await response.text().catch(() => "")
throw new Error(`Context-Engine request failed: ${response.status} ${response.statusText} - ${errorText}`)
}
return await response.json() as T
} finally {
clearTimeout(timeoutId)
}
}
}

View File

@@ -0,0 +1,13 @@
/**
* Context Engine module exports
*/
export { ContextEngineClient, type ContextEngineConfig, type QueryResponse, type IndexResponse } from "./client"
export {
ContextEngineService,
type ContextEngineServiceConfig,
type ContextEngineStatus,
getContextEngineService,
initializeContextEngineService,
shutdownContextEngineService,
} from "./service"

View File

@@ -0,0 +1,350 @@
/**
* Context Engine Service
* Manages the lifecycle of the Context-Engine process (Python sidecar)
* and provides access to the Context-Engine client.
*/
import { spawn, ChildProcess } from "child_process"
import { EventEmitter } from "events"
import { Logger } from "../logger"
import { ContextEngineClient, ContextEngineConfig, HealthResponse } from "./client"
export type ContextEngineStatus = "stopped" | "starting" | "ready" | "indexing" | "error"
export interface ContextEngineServiceConfig {
/** Path to the context-engine executable or Python script */
binaryPath?: string
/** Arguments to pass to the context-engine process */
args?: string[]
/** Port for the Context-Engine API (default: 8000) */
port: number
/** Host for the Context-Engine API (default: localhost) */
host: string
/** Whether to auto-start the engine when first needed (lazy start) */
lazyStart: boolean
/** Health check interval in milliseconds */
healthCheckInterval: number
/** Max retries for health check before marking as error */
maxHealthCheckRetries: number
}
const DEFAULT_SERVICE_CONFIG: ContextEngineServiceConfig = {
binaryPath: "context-engine",
args: [],
port: 8000,
host: "localhost",
lazyStart: true,
healthCheckInterval: 5000,
maxHealthCheckRetries: 3,
}
export class ContextEngineService extends EventEmitter {
private config: ContextEngineServiceConfig
private logger: Logger
private process: ChildProcess | null = null
private client: ContextEngineClient
private status: ContextEngineStatus = "stopped"
private healthCheckTimer: NodeJS.Timeout | null = null
private healthCheckFailures = 0
private indexingPaths = new Set<string>()
constructor(config: Partial<ContextEngineServiceConfig> = {}, logger: Logger) {
super()
this.config = { ...DEFAULT_SERVICE_CONFIG, ...config }
this.logger = logger
const clientConfig: Partial<ContextEngineConfig> = {
baseUrl: `http://${this.config.host}:${this.config.port}`,
timeout: 30000,
}
this.client = new ContextEngineClient(clientConfig, logger)
}
/**
* Get the current status of the Context-Engine
*/
getStatus(): ContextEngineStatus {
return this.status
}
/**
* Check if the Context-Engine is ready to accept requests
*/
isReady(): boolean {
return this.status === "ready" || this.status === "indexing"
}
/**
* Get the Context-Engine client for making API calls
*/
getClient(): ContextEngineClient {
return this.client
}
/**
* Start the Context-Engine process
*/
async start(): Promise<boolean> {
if (this.status === "ready" || this.status === "starting") {
this.logger.debug("Context-Engine already started or starting")
return true
}
this.setStatus("starting")
this.logger.info({ config: this.config }, "Starting Context-Engine service")
// First, check if an external Context-Engine is already running
const externalHealth = await this.client.health()
if (externalHealth.status === "healthy") {
this.logger.info("External Context-Engine detected and healthy")
this.setStatus("ready")
this.startHealthCheck()
return true
}
// Try to spawn the process
if (!this.config.binaryPath) {
this.logger.warn("No binary path configured for Context-Engine")
this.setStatus("error")
return false
}
try {
const args = [
...(this.config.args || []),
"--port", String(this.config.port),
"--host", this.config.host,
]
this.logger.info({ binary: this.config.binaryPath, args }, "Spawning Context-Engine process")
this.process = spawn(this.config.binaryPath, args, {
stdio: ["ignore", "pipe", "pipe"],
shell: process.platform === "win32",
detached: false,
})
this.process.stdout?.on("data", (data) => {
this.logger.debug({ output: data.toString().trim() }, "Context-Engine stdout")
})
this.process.stderr?.on("data", (data) => {
this.logger.debug({ output: data.toString().trim() }, "Context-Engine stderr")
})
this.process.on("error", (error) => {
this.logger.error({ error }, "Context-Engine process error")
this.setStatus("error")
})
this.process.on("exit", (code, signal) => {
this.logger.info({ code, signal }, "Context-Engine process exited")
this.process = null
if (this.status !== "stopped") {
this.setStatus("error")
}
})
// Wait for the process to become ready
const ready = await this.waitForReady(30000)
if (ready) {
this.setStatus("ready")
this.startHealthCheck()
return true
} else {
this.logger.error("Context-Engine failed to become ready")
this.setStatus("error")
return false
}
} catch (error) {
this.logger.error({ error }, "Failed to spawn Context-Engine process")
this.setStatus("error")
return false
}
}
/**
* Stop the Context-Engine process
*/
async stop(): Promise<void> {
this.stopHealthCheck()
this.setStatus("stopped")
if (this.process) {
this.logger.info("Stopping Context-Engine process")
this.process.kill("SIGTERM")
// Wait for graceful shutdown
await new Promise<void>((resolve) => {
const timeout = setTimeout(() => {
if (this.process) {
this.logger.warn("Context-Engine did not exit gracefully, killing")
this.process.kill("SIGKILL")
}
resolve()
}, 5000)
if (this.process) {
this.process.once("exit", () => {
clearTimeout(timeout)
resolve()
})
} else {
clearTimeout(timeout)
resolve()
}
})
this.process = null
}
}
/**
* Trigger indexing for a workspace path (non-blocking)
*/
async indexPath(path: string): Promise<void> {
if (!this.config.lazyStart && !this.isReady()) {
this.logger.debug({ path }, "Context-Engine not ready, skipping indexing")
return
}
// Lazy start if needed
if (this.config.lazyStart && this.status === "stopped") {
this.logger.info({ path }, "Lazy-starting Context-Engine for indexing")
const started = await this.start()
if (!started) {
this.logger.warn({ path }, "Failed to start Context-Engine for indexing")
return
}
}
if (this.indexingPaths.has(path)) {
this.logger.debug({ path }, "Path already being indexed")
return
}
this.indexingPaths.add(path)
this.setStatus("indexing")
// Fire and forget - don't block workspace creation
this.client.index(path).then((response) => {
this.indexingPaths.delete(path)
if (response.status === "error") {
this.logger.warn({ path, response }, "Context-Engine indexing failed")
} else {
this.logger.info({ path, indexed_files: response.indexed_files }, "Context-Engine indexing completed")
}
if (this.indexingPaths.size === 0 && this.status === "indexing") {
this.setStatus("ready")
}
this.emit("indexComplete", { path, response })
}).catch((error) => {
this.indexingPaths.delete(path)
this.logger.error({ path, error }, "Context-Engine indexing error")
if (this.indexingPaths.size === 0 && this.status === "indexing") {
this.setStatus("ready")
}
})
}
/**
* Query the Context-Engine for relevant code snippets
*/
async query(prompt: string, contextWindow?: number): Promise<string | null> {
if (!this.isReady()) {
return null
}
try {
const response = await this.client.query(prompt, contextWindow)
if (response.results.length === 0) {
return null
}
// Format the results as a context block
const contextParts = response.results.map((result, index) => {
return `// File: ${result.file_path} (relevance: ${(result.score * 100).toFixed(1)}%)\n${result.content}`
})
return `<context_engine_retrieval>\n${contextParts.join("\n\n")}\n</context_engine_retrieval>`
} catch (error) {
this.logger.warn({ error }, "Context-Engine query failed")
return null
}
}
private setStatus(status: ContextEngineStatus): void {
if (this.status !== status) {
this.logger.info({ oldStatus: this.status, newStatus: status }, "Context-Engine status changed")
this.status = status
this.emit("statusChange", status)
}
}
private async waitForReady(timeoutMs: number): Promise<boolean> {
const startTime = Date.now()
const checkInterval = 500
while (Date.now() - startTime < timeoutMs) {
const health = await this.client.health()
if (health.status === "healthy") {
return true
}
await new Promise((resolve) => setTimeout(resolve, checkInterval))
}
return false
}
private startHealthCheck(): void {
if (this.healthCheckTimer) return
this.healthCheckTimer = setInterval(async () => {
const health = await this.client.health()
if (health.status === "healthy") {
this.healthCheckFailures = 0
if (this.status === "error") {
this.setStatus("ready")
}
} else {
this.healthCheckFailures++
if (this.healthCheckFailures >= this.config.maxHealthCheckRetries) {
this.logger.warn("Context-Engine health check failed multiple times")
this.setStatus("error")
}
}
}, this.config.healthCheckInterval)
}
private stopHealthCheck(): void {
if (this.healthCheckTimer) {
clearInterval(this.healthCheckTimer)
this.healthCheckTimer = null
}
}
}
// Singleton instance for global access
let globalContextEngineService: ContextEngineService | null = null
export function getContextEngineService(): ContextEngineService | null {
return globalContextEngineService
}
export function initializeContextEngineService(
config: Partial<ContextEngineServiceConfig>,
logger: Logger
): ContextEngineService {
if (globalContextEngineService) {
return globalContextEngineService
}
globalContextEngineService = new ContextEngineService(config, logger)
return globalContextEngineService
}
export async function shutdownContextEngineService(): Promise<void> {
if (globalContextEngineService) {
await globalContextEngineService.stop()
globalContextEngineService = null
}
}

View File

@@ -19,6 +19,7 @@ import { createLogger } from "./logger"
import { getUserConfigPath } from "./user-data"
import { launchInBrowser } from "./launcher"
import { startReleaseMonitor } from "./releases/release-monitor"
import { initializeContextEngineService, shutdownContextEngineService } from "./context-engine"
const require = createRequire(import.meta.url)
@@ -140,6 +141,16 @@ async function main() {
logger: logger.child({ component: "instance-events" }),
})
// Initialize Context-Engine service (lazy start - starts when first workspace opens)
const contextEngineService = initializeContextEngineService(
{
lazyStart: true,
port: 8000,
host: "localhost",
},
logger.child({ component: "context-engine" })
)
const serverMeta: ServerMeta = {
httpBaseUrl: `http://${options.host}:${options.port}`,
eventsUrl: `/api/events`,
@@ -211,6 +222,13 @@ async function main() {
logger.error({ err: error }, "Workspace manager shutdown failed")
}
try {
await shutdownContextEngineService()
logger.info("Context-Engine shutdown complete")
} catch (error) {
logger.error({ err: error }, "Context-Engine shutdown failed")
}
releaseMonitor.stop()
logger.info("Exiting process")

View File

@@ -0,0 +1,680 @@
/**
* Antigravity API Integration for Binary-Free Mode
* Provides direct access to Antigravity models (Gemini, Claude, GPT-OSS) via Google OAuth
* Based on the opencode-antigravity-auth plugin: https://github.com/NoeFabris/opencode-antigravity-auth
*
* This integration enables access to:
* - Gemini 3 Pro/Flash models
* - Claude Sonnet 4.5 / Opus 4.5 (with thinking variants)
* - GPT-OSS 120B
*
* Uses Google OAuth credentials stored via the Antigravity OAuth flow
*/
import { randomUUID } from "crypto"
import { z } from "zod"
// Configuration schema for Antigravity
export const AntigravityConfigSchema = z.object({
enabled: z.boolean().default(true),
// Multiple endpoints for automatic fallback (daily → autopush → prod)
endpoints: z.array(z.string()).default([
"https://daily-cloudcode-pa.sandbox.googleapis.com",
"https://autopush-cloudcode-pa.sandbox.googleapis.com",
"https://cloudcode-pa.googleapis.com"
]),
apiKey: z.string().optional()
})
export type AntigravityConfig = z.infer<typeof AntigravityConfigSchema>
// Antigravity Model schema
export const AntigravityModelSchema = z.object({
id: z.string(),
name: z.string(),
family: z.enum(["gemini", "claude", "gpt-oss"]).optional(),
reasoning: z.boolean().optional(),
tool_call: z.boolean().optional(),
limit: z.object({
context: z.number(),
output: z.number()
}).optional()
})
export type AntigravityModel = z.infer<typeof AntigravityModelSchema>
// Chat message schema (OpenAI-compatible)
export const ChatMessageSchema = z.object({
role: z.enum(["user", "assistant", "system", "tool"]),
content: z.string().optional(),
tool_calls: z.array(z.object({
id: z.string(),
type: z.literal("function"),
function: z.object({
name: z.string(),
arguments: z.string()
})
})).optional(),
tool_call_id: z.string().optional()
})
export type ChatMessage = z.infer<typeof ChatMessageSchema>
// Tool Definition Schema
export const ToolDefinitionSchema = z.object({
type: z.literal("function"),
function: z.object({
name: z.string(),
description: z.string(),
parameters: z.object({
type: z.literal("object"),
properties: z.record(z.any()),
required: z.array(z.string()).optional()
})
})
})
export type ToolDefinition = z.infer<typeof ToolDefinitionSchema>
// Chat request schema
export const ChatRequestSchema = z.object({
model: z.string(),
messages: z.array(ChatMessageSchema),
stream: z.boolean().default(true),
temperature: z.number().optional(),
max_tokens: z.number().optional(),
tools: z.array(ToolDefinitionSchema).optional(),
tool_choice: z.union([
z.literal("auto"),
z.literal("none"),
z.object({
type: z.literal("function"),
function: z.object({ name: z.string() })
})
]).optional(),
workspacePath: z.string().optional(),
enableTools: z.boolean().optional()
})
export type ChatRequest = z.infer<typeof ChatRequestSchema>
// Chat response chunk schema
export const ChatChunkSchema = z.object({
id: z.string().optional(),
object: z.string().optional(),
created: z.number().optional(),
model: z.string().optional(),
choices: z.array(z.object({
index: z.number(),
delta: z.object({
role: z.string().optional(),
content: z.string().optional(),
reasoning_content: z.string().optional()
}).optional(),
message: z.object({
role: z.string(),
content: z.string()
}).optional(),
finish_reason: z.string().nullable().optional()
}))
})
export type ChatChunk = z.infer<typeof ChatChunkSchema>
// Available Antigravity models with their specifications
export const ANTIGRAVITY_MODELS: AntigravityModel[] = [
// Gemini Models
{
id: "gemini-3-pro-low",
name: "Gemini 3 Pro Low (Antigravity)",
family: "gemini",
reasoning: true,
tool_call: true,
limit: { context: 1048576, output: 65535 }
},
{
id: "gemini-3-pro-high",
name: "Gemini 3 Pro High (Antigravity)",
family: "gemini",
reasoning: true,
tool_call: true,
limit: { context: 1048576, output: 65535 }
},
{
id: "gemini-3-flash",
name: "Gemini 3 Flash (Antigravity)",
family: "gemini",
reasoning: false,
tool_call: true,
limit: { context: 1048576, output: 65536 }
},
// Claude Models
{
id: "claude-sonnet-4-5",
name: "Claude Sonnet 4.5 (Antigravity)",
family: "claude",
reasoning: false,
tool_call: true,
limit: { context: 200000, output: 64000 }
},
{
id: "claude-sonnet-4-5-thinking-low",
name: "Claude Sonnet 4.5 Thinking Low (Antigravity)",
family: "claude",
reasoning: true,
tool_call: true,
limit: { context: 200000, output: 64000 }
},
{
id: "claude-sonnet-4-5-thinking-medium",
name: "Claude Sonnet 4.5 Thinking Medium (Antigravity)",
family: "claude",
reasoning: true,
tool_call: true,
limit: { context: 200000, output: 64000 }
},
{
id: "claude-sonnet-4-5-thinking-high",
name: "Claude Sonnet 4.5 Thinking High (Antigravity)",
family: "claude",
reasoning: true,
tool_call: true,
limit: { context: 200000, output: 64000 }
},
{
id: "claude-opus-4-5",
name: "Claude Opus 4.5 (Antigravity)",
family: "claude",
reasoning: false,
tool_call: true,
limit: { context: 200000, output: 64000 }
},
{
id: "claude-opus-4-5-thinking-low",
name: "Claude Opus 4.5 Thinking Low (Antigravity)",
family: "claude",
reasoning: true,
tool_call: true,
limit: { context: 200000, output: 64000 }
},
{
id: "claude-opus-4-5-thinking-medium",
name: "Claude Opus 4.5 Thinking Medium (Antigravity)",
family: "claude",
reasoning: true,
tool_call: true,
limit: { context: 200000, output: 64000 }
},
{
id: "claude-opus-4-5-thinking-high",
name: "Claude Opus 4.5 Thinking High (Antigravity)",
family: "claude",
reasoning: true,
tool_call: true,
limit: { context: 200000, output: 64000 }
},
// Other Models
{
id: "gpt-oss-120b-medium",
name: "GPT-OSS 120B Medium (Antigravity)",
family: "gpt-oss",
reasoning: true,
tool_call: true,
limit: { context: 131072, output: 32768 }
}
]
// Token storage key for Antigravity OAuth
const ANTIGRAVITY_TOKEN_KEY = "antigravity_oauth_token"
const ANTIGRAVITY_HEADERS = {
"User-Agent": "antigravity/1.11.5 windows/amd64",
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
"Client-Metadata": "{\"ideType\":\"IDE_UNSPECIFIED\",\"platform\":\"PLATFORM_UNSPECIFIED\",\"pluginType\":\"GEMINI\"}",
} as const
const LOAD_ASSIST_HEADERS = {
"User-Agent": "google-api-nodejs-client/9.15.1",
"X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1",
"Client-Metadata": ANTIGRAVITY_HEADERS["Client-Metadata"],
} as const
const DEFAULT_PROJECT_ID = process.env.ANTIGRAVITY_PROJECT_ID || "rising-fact-p41fc"
const LOAD_ASSIST_METADATA = {
ideType: "IDE_UNSPECIFIED",
platform: "PLATFORM_UNSPECIFIED",
pluginType: "GEMINI"
} as const
const LOAD_ENDPOINTS = [
"https://cloudcode-pa.googleapis.com",
"https://daily-cloudcode-pa.sandbox.googleapis.com",
"https://autopush-cloudcode-pa.sandbox.googleapis.com"
] as const
const STREAM_ACTION = "streamGenerateContent"
const GENERATE_ACTION = "generateContent"
export interface AntigravityToken {
access_token: string
refresh_token?: string
expires_in: number
created_at: number
project_id?: string
}
function generateSyntheticProjectId(): string {
const adjectives = ["useful", "bright", "swift", "calm", "bold"]
const nouns = ["fuze", "wave", "spark", "flow", "core"]
const adj = adjectives[Math.floor(Math.random() * adjectives.length)]
const noun = nouns[Math.floor(Math.random() * nouns.length)]
const random = randomUUID().slice(0, 5).toLowerCase()
return `${adj}-${noun}-${random}`
}
function collectSystemInstruction(messages: ChatMessage[]): string | undefined {
const systemParts: string[] = []
for (const message of messages) {
if (message.role === "system" && typeof message.content === "string") {
systemParts.push(message.content)
}
}
const combined = systemParts.join("\n\n").trim()
return combined.length > 0 ? combined : undefined
}
function buildContents(messages: ChatMessage[]): Array<{ role: "user" | "model"; parts: Array<{ text: string }> }> {
const contents: Array<{ role: "user" | "model"; parts: Array<{ text: string }> }> = []
for (const message of messages) {
if (!message.content) continue
if (message.role === "system") continue
const role = message.role === "assistant" ? "model" : "user"
const prefix = message.role === "tool" ? "Tool result:\n" : ""
contents.push({
role,
parts: [{ text: `${prefix}${message.content}` }]
})
}
return contents
}
function extractTextFromResponse(payload: any): string {
const candidates = payload?.candidates
if (!Array.isArray(candidates) || candidates.length === 0) return ""
const parts = candidates[0]?.content?.parts
if (!Array.isArray(parts)) return ""
return parts.map((part: any) => (typeof part?.text === "string" ? part.text : "")).join("")
}
export class AntigravityClient {
private config: AntigravityConfig
private currentEndpointIndex: number = 0
private modelsCache: AntigravityModel[] | null = null
private modelsCacheTime: number = 0
private projectIdCache: string | null = null
private readonly CACHE_TTL_MS = 5 * 60 * 1000 // 5 minutes
constructor(config?: Partial<AntigravityConfig>) {
this.config = AntigravityConfigSchema.parse(config || {})
}
/**
* Get the current active endpoint with automatic fallback
*/
private getEndpoint(): string {
const endpoints = this.config.endpoints
return endpoints[this.currentEndpointIndex] || endpoints[0]
}
/**
* Rotate to next endpoint on failure
*/
private rotateEndpoint(): void {
this.currentEndpointIndex = (this.currentEndpointIndex + 1) % this.config.endpoints.length
console.log(`Antigravity: Rotating to endpoint ${this.getEndpoint()}`)
}
/**
* Get stored OAuth token from localStorage (browser context)
*/
getStoredToken(): AntigravityToken | null {
if (typeof window === "undefined") return null
try {
const raw = window.localStorage.getItem(ANTIGRAVITY_TOKEN_KEY)
if (!raw) return null
return JSON.parse(raw)
} catch {
return null
}
}
/**
* Check if the stored token is still valid
*/
isTokenValid(token: AntigravityToken | null): boolean {
if (!token) return false
const createdAt = token.created_at > 1e12 ? Math.floor(token.created_at / 1000) : token.created_at
const expiresAt = (createdAt + token.expires_in) * 1000 - 300000 // 5 min buffer
return Date.now() < expiresAt
}
/**
* Get authorization headers for API requests
*/
private getAuthHeaders(accessToken?: string): Record<string, string> {
const headers: Record<string, string> = {
"Content-Type": "application/json",
"User-Agent": ANTIGRAVITY_HEADERS["User-Agent"],
"X-Goog-Api-Client": ANTIGRAVITY_HEADERS["X-Goog-Api-Client"],
"Client-Metadata": ANTIGRAVITY_HEADERS["Client-Metadata"],
}
if (accessToken) {
headers["Authorization"] = `Bearer ${accessToken}`
return headers
}
// Try OAuth token first
const token = this.getStoredToken()
if (token && this.isTokenValid(token)) {
headers["Authorization"] = `Bearer ${token.access_token}`
} else if (this.config.apiKey) {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
return headers
}
private getLoadHeaders(accessToken: string): Record<string, string> {
return {
"Content-Type": "application/json",
"User-Agent": LOAD_ASSIST_HEADERS["User-Agent"],
"X-Goog-Api-Client": LOAD_ASSIST_HEADERS["X-Goog-Api-Client"],
"Client-Metadata": LOAD_ASSIST_HEADERS["Client-Metadata"],
Authorization: `Bearer ${accessToken}`,
}
}
/**
* Check if the client is authenticated
*/
isAuthenticated(accessToken?: string): boolean {
if (accessToken) return true
const token = this.getStoredToken()
return this.isTokenValid(token) || Boolean(this.config.apiKey)
}
private async resolveProjectId(accessToken: string | undefined, projectIdOverride?: string): Promise<string> {
const requestedProjectId = projectIdOverride?.trim()
if (this.projectIdCache && !requestedProjectId) return this.projectIdCache
if (!accessToken) {
const fallback = requestedProjectId || generateSyntheticProjectId()
if (requestedProjectId) {
this.projectIdCache = requestedProjectId
}
return fallback
}
const loadEndpoints = Array.from(new Set([...LOAD_ENDPOINTS, ...this.config.endpoints]))
const tryLoad = async (metadata: Record<string, string>): Promise<string | null> => {
for (const endpoint of loadEndpoints) {
try {
const response = await fetch(`${endpoint}/v1internal:loadCodeAssist`, {
method: "POST",
headers: this.getLoadHeaders(accessToken),
body: JSON.stringify({ metadata }),
signal: AbortSignal.timeout(10000),
})
if (!response.ok) continue
const data = await response.json() as any
const projectId =
data?.cloudaicompanionProject?.id ||
data?.cloudaicompanionProject ||
data?.projectId
if (typeof projectId === "string" && projectId.length > 0) {
return projectId
}
} catch {
continue
}
}
return null
}
let resolvedProjectId: string | null = null
const baseMetadata: Record<string, string> = { ...LOAD_ASSIST_METADATA }
if (requestedProjectId) {
baseMetadata.duetProject = requestedProjectId
resolvedProjectId = await tryLoad(baseMetadata)
} else {
resolvedProjectId = await tryLoad(baseMetadata)
if (!resolvedProjectId) {
const fallbackMetadata: Record<string, string> = {
...LOAD_ASSIST_METADATA,
duetProject: DEFAULT_PROJECT_ID,
}
resolvedProjectId = await tryLoad(fallbackMetadata)
}
}
const fallbackProjectId = requestedProjectId || DEFAULT_PROJECT_ID
const finalProjectId = resolvedProjectId || fallbackProjectId
this.projectIdCache = finalProjectId
return finalProjectId
}
private resolveAccessToken(accessToken?: string): string | null {
if (accessToken) return accessToken
const token = this.getStoredToken()
if (token && this.isTokenValid(token)) {
return token.access_token
}
if (this.config.apiKey) {
return this.config.apiKey
}
return null
}
private async requestGenerateContent(request: ChatRequest, accessToken?: string, projectIdOverride?: string): Promise<string> {
const authToken = this.resolveAccessToken(accessToken)
if (!authToken) {
throw new Error("Antigravity: Missing access token.")
}
const projectId = await this.resolveProjectId(authToken, projectIdOverride)
const systemInstruction = collectSystemInstruction(request.messages)
const contents = buildContents(request.messages)
const generationConfig: Record<string, unknown> = {}
if (typeof request.temperature === "number") {
generationConfig.temperature = request.temperature
}
if (typeof request.max_tokens === "number") {
generationConfig.maxOutputTokens = request.max_tokens
}
const requestPayload: Record<string, unknown> = { contents }
if (systemInstruction) {
requestPayload.systemInstruction = { parts: [{ text: systemInstruction }] }
}
if (Object.keys(generationConfig).length > 0) {
requestPayload.generationConfig = generationConfig
}
const body = JSON.stringify({
project: projectId,
model: request.model,
request: requestPayload,
userAgent: "antigravity",
requestId: `agent-${randomUUID()}`
})
let lastError: Error | null = null
const maxRetries = this.config.endpoints.length
for (let retry = 0; retry < maxRetries; retry++) {
try {
const endpoint = this.getEndpoint()
const response = await fetch(`${endpoint}/v1internal:${GENERATE_ACTION}`, {
method: "POST",
headers: this.getAuthHeaders(authToken),
body,
signal: AbortSignal.timeout(120000)
})
if (!response.ok) {
const errorText = await response.text()
if (response.status === 401 || response.status === 403) {
throw new Error(`Antigravity authentication failed: ${errorText}`)
}
this.rotateEndpoint()
lastError = new Error(`Antigravity API error (${response.status}): ${errorText}`)
continue
}
const data = await response.json()
return extractTextFromResponse(data)
} catch (error) {
lastError = error instanceof Error ? error : new Error(String(error))
if (error instanceof Error && error.message.includes("authentication")) {
throw error
}
this.rotateEndpoint()
}
}
throw lastError || new Error("Antigravity: All endpoints failed")
}
/**
* Get available Antigravity models
*/
async getModels(accessToken?: string): Promise<AntigravityModel[]> {
// Return full model list even if not authenticated, so they appear in selectors
// Authenticaton is checked during actual chat requests
const now = Date.now()
if (this.modelsCache && (now - this.modelsCacheTime) < this.CACHE_TTL_MS) {
return this.modelsCache
}
// If authenticated, return full model list
this.modelsCache = ANTIGRAVITY_MODELS
this.modelsCacheTime = now
return ANTIGRAVITY_MODELS
}
/**
* Test connection to Antigravity API
*/
async testConnection(accessToken?: string, projectIdOverride?: string): Promise<{ connected: boolean; error?: string; status?: number }> {
if (!this.isAuthenticated(accessToken)) {
return { connected: false, error: "Not authenticated" }
}
try {
const authToken = this.resolveAccessToken(accessToken)
if (!authToken) {
return { connected: false, error: "Not authenticated" }
}
const requestedProjectId = projectIdOverride?.trim()
const loadEndpoints = Array.from(new Set([...LOAD_ENDPOINTS, ...this.config.endpoints]))
let lastErrorText = ""
let lastStatus: number | undefined
const tryLoad = async (metadata: Record<string, string>): Promise<boolean> => {
for (const endpoint of loadEndpoints) {
const response = await fetch(`${endpoint}/v1internal:loadCodeAssist`, {
method: "POST",
headers: this.getLoadHeaders(authToken),
body: JSON.stringify({ metadata }),
signal: AbortSignal.timeout(10000)
})
if (response.ok) {
return true
}
lastStatus = response.status
lastErrorText = await response.text().catch(() => "") || response.statusText
}
return false
}
const baseMetadata: Record<string, string> = { ...LOAD_ASSIST_METADATA }
if (requestedProjectId) {
baseMetadata.duetProject = requestedProjectId
}
let success = await tryLoad(baseMetadata)
if (!success && !requestedProjectId) {
const fallbackMetadata: Record<string, string> = {
...LOAD_ASSIST_METADATA,
duetProject: DEFAULT_PROJECT_ID,
}
success = await tryLoad(fallbackMetadata)
}
if (success) {
return { connected: true }
}
return {
connected: false,
status: lastStatus,
error: lastErrorText || "Connection test failed"
}
} catch (error) {
console.warn("Antigravity connection test failed:", error)
return { connected: false, error: error instanceof Error ? error.message : String(error) }
}
}
/**
* Chat completion (streaming) with automatic endpoint fallback
*/
async *chatStream(request: ChatRequest, accessToken?: string, projectIdOverride?: string): AsyncGenerator<ChatChunk> {
if (!this.isAuthenticated(accessToken)) {
throw new Error("Antigravity: Not authenticated. Please sign in with Google OAuth.")
}
const content = await this.requestGenerateContent(request, accessToken, projectIdOverride)
yield {
id: randomUUID(),
choices: [
{
index: 0,
delta: { content },
finish_reason: "stop"
}
]
}
}
/**
* Chat completion (non-streaming)
*/
async chat(request: ChatRequest, accessToken?: string, projectIdOverride?: string): Promise<ChatChunk> {
if (!this.isAuthenticated(accessToken)) {
throw new Error("Antigravity: Not authenticated. Please sign in with Google OAuth.")
}
const content = await this.requestGenerateContent(request, accessToken, projectIdOverride)
return {
id: randomUUID(),
choices: [
{
index: 0,
message: {
role: "assistant",
content
},
finish_reason: "stop"
}
]
}
}
}
export function getDefaultAntigravityConfig(): AntigravityConfig {
return {
enabled: true,
endpoints: [
"https://daily-cloudcode-pa.sandbox.googleapis.com",
"https://autopush-cloudcode-pa.sandbox.googleapis.com",
"https://cloudcode-pa.googleapis.com"
]
}
}

View File

@@ -1,4 +1,5 @@
import { z } from "zod"
import { getContextEngineService } from "../context-engine"
export const OllamaCloudConfigSchema = z.object({
apiKey: z.string().optional(),
@@ -208,11 +209,41 @@ export class OllamaCloudClient {
headers["Authorization"] = `Bearer ${this.config.apiKey}`
}
// Inject Context-Engine RAG context if available
let enrichedRequest = request
try {
const contextEngine = getContextEngineService()
if (contextEngine?.isReady()) {
// Get the last user message for context retrieval
const lastUserMessage = [...request.messages].reverse().find(m => m.role === "user")
if (lastUserMessage?.content) {
const contextBlock = await contextEngine.query(lastUserMessage.content, 4096)
if (contextBlock) {
// Clone messages and inject context into the last user message
const messagesWithContext = request.messages.map((msg, index) => {
if (msg === lastUserMessage) {
return {
...msg,
content: `${contextBlock}\n\n${msg.content}`
}
}
return msg
})
enrichedRequest = { ...request, messages: messagesWithContext }
console.log("[OllamaCloud] Context-Engine context injected")
}
}
}
} catch (contextError) {
// Graceful fallback - continue without context if Context-Engine fails
console.warn("[OllamaCloud] Context-Engine query failed, continuing without RAG context:", contextError)
}
try {
const response = await this.makeRequest("/chat", {
method: "POST",
headers,
body: JSON.stringify(request)
body: JSON.stringify(enrichedRequest)
})
if (!response.ok) {

View File

@@ -42,19 +42,55 @@ export type ZenModel = z.infer<typeof ZenModelSchema>
// Chat message schema (OpenAI-compatible)
export const ChatMessageSchema = z.object({
role: z.enum(["user", "assistant", "system"]),
content: z.string()
role: z.enum(["user", "assistant", "system", "tool"]),
content: z.string().optional(),
tool_calls: z.array(z.object({
id: z.string(),
type: z.literal("function"),
function: z.object({
name: z.string(),
arguments: z.string()
})
})).optional(),
tool_call_id: z.string().optional()
})
export type ChatMessage = z.infer<typeof ChatMessageSchema>
// Chat request schema
// Tool Definition Schema
export const ToolDefinitionSchema = z.object({
type: z.literal("function"),
function: z.object({
name: z.string(),
description: z.string(),
parameters: z.object({
type: z.literal("object"),
properties: z.record(z.any()),
required: z.array(z.string()).optional()
})
})
})
export type ToolDefinition = z.infer<typeof ToolDefinitionSchema>
export const ChatRequestSchema = z.object({
model: z.string(),
messages: z.array(ChatMessageSchema),
stream: z.boolean().default(true),
temperature: z.number().optional(),
max_tokens: z.number().optional()
max_tokens: z.number().optional(),
tools: z.array(ToolDefinitionSchema).optional(),
tool_choice: z.union([
z.literal("auto"),
z.literal("none"),
z.object({
type: z.literal("function"),
function: z.object({ name: z.string() })
})
]).optional(),
workspacePath: z.string().optional(),
enableTools: z.boolean().optional()
})
export type ChatRequest = z.infer<typeof ChatRequestSchema>

View File

@@ -1,8 +1,7 @@
import { z } from "zod"
export const ZAIConfigSchema = z.object({
apiKey: z.string().optional(),
endpoint: z.string().default("https://api.z.ai/api/paas/v4"),
endpoint: z.string().default("https://api.z.ai/api"),
enabled: z.boolean().default(false),
timeout: z.number().default(300000)
})
@@ -10,18 +9,55 @@ export const ZAIConfigSchema = z.object({
export type ZAIConfig = z.infer<typeof ZAIConfigSchema>
export const ZAIMessageSchema = z.object({
role: z.enum(["user", "assistant", "system"]),
content: z.string()
role: z.enum(["user", "assistant", "system", "tool"]),
content: z.string().optional(),
tool_calls: z.array(z.object({
id: z.string(),
type: z.literal("function"),
function: z.object({
name: z.string(),
arguments: z.string()
})
})).optional(),
tool_call_id: z.string().optional()
})
export type ZAIMessage = z.infer<typeof ZAIMessageSchema>
// Tool Definition Schema (OpenAI-compatible)
export const ZAIToolSchema = z.object({
type: z.literal("function"),
function: z.object({
name: z.string(),
description: z.string(),
parameters: z.object({
type: z.literal("object"),
properties: z.record(z.object({
type: z.string(),
description: z.string().optional()
})),
required: z.array(z.string()).optional()
})
})
})
export type ZAITool = z.infer<typeof ZAIToolSchema>
export const ZAIChatRequestSchema = z.object({
model: z.string().default("glm-4.7"),
messages: z.array(ZAIMessageSchema),
max_tokens: z.number().default(8192),
stream: z.boolean().default(true),
temperature: z.number().optional(),
tools: z.array(ZAIToolSchema).optional(),
tool_choice: z.union([
z.literal("auto"),
z.literal("none"),
z.object({
type: z.literal("function"),
function: z.object({ name: z.string() })
})
]).optional(),
thinking: z.object({
type: z.enum(["enabled", "disabled"]).optional()
}).optional()
@@ -38,8 +74,16 @@ export const ZAIChatResponseSchema = z.object({
index: z.number(),
message: z.object({
role: z.string(),
content: z.string().optional(),
reasoning_content: z.string().optional()
content: z.string().optional().nullable(),
reasoning_content: z.string().optional(),
tool_calls: z.array(z.object({
id: z.string(),
type: z.literal("function"),
function: z.object({
name: z.string(),
arguments: z.string()
})
})).optional()
}),
finish_reason: z.string()
})),
@@ -61,8 +105,17 @@ export const ZAIStreamChunkSchema = z.object({
index: z.number(),
delta: z.object({
role: z.string().optional(),
content: z.string().optional(),
reasoning_content: z.string().optional()
content: z.string().optional().nullable(),
reasoning_content: z.string().optional(),
tool_calls: z.array(z.object({
index: z.number().optional(),
id: z.string().optional(),
type: z.literal("function").optional(),
function: z.object({
name: z.string().optional(),
arguments: z.string().optional()
}).optional()
})).optional()
}),
finish_reason: z.string().nullable().optional()
}))
@@ -87,7 +140,8 @@ export class ZAIClient {
constructor(config: ZAIConfig) {
this.config = config
this.baseUrl = config.endpoint.replace(/\/$/, "")
const trimmed = config.endpoint.replace(/\/$/, "")
this.baseUrl = trimmed.replace(/\/(?:api\/coding\/)?paas\/v4$/, "")
}
async testConnection(): Promise<boolean> {
@@ -96,7 +150,7 @@ export class ZAIClient {
}
try {
const response = await fetch(`${this.baseUrl}/chat/completions`, {
const response = await fetch(`${this.baseUrl}/paas/v4/chat/completions`, {
method: "POST",
headers: this.getHeaders(),
body: JSON.stringify({
@@ -106,7 +160,12 @@ export class ZAIClient {
})
})
return response.status !== 401 && response.status !== 403
if (!response.ok) {
const text = await response.text()
console.error(`Z.AI connection failed (${response.status}): ${text}`)
}
return response.ok
} catch (error) {
console.error("Z.AI connection test failed:", error)
return false
@@ -122,7 +181,7 @@ export class ZAIClient {
throw new Error("Z.AI API key is required")
}
const response = await fetch(`${this.baseUrl}/chat/completions`, {
const response = await fetch(`${this.baseUrl}/paas/v4/chat/completions`, {
method: "POST",
headers: this.getHeaders(),
body: JSON.stringify({
@@ -176,7 +235,7 @@ export class ZAIClient {
throw new Error("Z.AI API key is required")
}
const response = await fetch(`${this.baseUrl}/chat/completions`, {
const response = await fetch(`${this.baseUrl}/paas/v4/chat/completions`, {
method: "POST",
headers: this.getHeaders(),
body: JSON.stringify({
@@ -196,7 +255,7 @@ export class ZAIClient {
private getHeaders(): Record<string, string> {
return {
"Content-Type": "application/json",
"Authorization": `Bearer ${this.config.apiKey}`
"Authorization": `Bearer ${this.config.apiKey!}`
}
}

View File

@@ -0,0 +1,532 @@
/**
* MCP Client - Connects to MCP (Model Context Protocol) servers
* and provides tool discovery and execution capabilities.
*
* Supports:
* - stdio-based MCP servers (command + args)
* - HTTP/SSE-based remote MCP servers
*/
import { spawn, ChildProcess } from "child_process"
import { createLogger } from "../logger"
import path from "path"
const log = createLogger({ component: "mcp-client" })
// MCP Protocol Types
export interface McpServerConfig {
command?: string
args?: string[]
env?: Record<string, string>
type?: "stdio" | "remote" | "http" | "sse" | "streamable-http"
url?: string
headers?: Record<string, string>
}
export interface McpToolDefinition {
name: string
description: string
inputSchema: {
type: "object"
properties: Record<string, { type: string; description?: string }>
required?: string[]
}
}
export interface McpToolCall {
name: string
arguments: Record<string, unknown>
}
export interface McpToolResult {
content: Array<{
type: "text" | "image" | "resource"
text?: string
data?: string
mimeType?: string
}>
isError?: boolean
}
// MCP JSON-RPC Message Types
interface JsonRpcRequest {
jsonrpc: "2.0"
id: number | string
method: string
params?: unknown
}
interface JsonRpcResponse {
jsonrpc: "2.0"
id: number | string
result?: unknown
error?: { code: number; message: string; data?: unknown }
}
/**
* MCP Client for a single server
*/
export class McpClient {
private config: McpServerConfig
private process: ChildProcess | null = null
private messageId = 0
private pendingRequests: Map<number | string, {
resolve: (value: unknown) => void
reject: (reason: unknown) => void
}> = new Map()
private buffer = ""
private tools: McpToolDefinition[] = []
private connected = false
private serverName: string
constructor(serverName: string, config: McpServerConfig) {
this.serverName = serverName
this.config = config
}
/**
* Start and connect to the MCP server
*/
async connect(): Promise<void> {
if (this.connected) return
if (this.config.type === "remote" || this.config.type === "http" || this.config.type === "sse") {
// HTTP-based server - just mark as connected
this.connected = true
log.info({ server: this.serverName, type: this.config.type }, "Connected to remote MCP server")
return
}
// Stdio-based server
if (!this.config.command) {
throw new Error(`MCP server ${this.serverName} has no command configured`)
}
log.info({ server: this.serverName, command: this.config.command, args: this.config.args }, "Starting MCP server")
this.process = spawn(this.config.command, this.config.args || [], {
stdio: ["pipe", "pipe", "pipe"],
env: { ...process.env, ...this.config.env },
shell: true
})
this.process.stdout?.on("data", (data) => this.handleData(data.toString()))
this.process.stderr?.on("data", (data) => log.warn({ server: this.serverName }, `MCP stderr: ${data}`))
this.process.on("error", (err) => log.error({ server: this.serverName, error: err }, "MCP process error"))
this.process.on("exit", (code) => {
log.info({ server: this.serverName, code }, "MCP process exited")
this.connected = false
})
// Wait for process to start
await new Promise(resolve => setTimeout(resolve, 500))
// Initialize the server
try {
await this.sendRequest("initialize", {
protocolVersion: "2024-11-05",
capabilities: { tools: {} },
clientInfo: { name: "NomadArch", version: "0.4.0" }
})
await this.sendRequest("notifications/initialized", {})
this.connected = true
log.info({ server: this.serverName }, "MCP server initialized")
} catch (error) {
log.error({ server: this.serverName, error }, "Failed to initialize MCP server")
this.disconnect()
throw error
}
}
/**
* Disconnect from the MCP server
*/
disconnect(): void {
if (this.process) {
this.process.kill()
this.process = null
}
this.connected = false
this.tools = []
this.pendingRequests.clear()
}
/**
* List available tools from this MCP server
*/
async listTools(): Promise<McpToolDefinition[]> {
if (!this.connected) {
await this.connect()
}
if (this.config.type === "remote" || this.config.type === "http") {
// For HTTP servers, fetch tools via HTTP
return this.fetchToolsHttp()
}
try {
const response = await this.sendRequest("tools/list", {}) as { tools?: McpToolDefinition[] }
this.tools = response.tools || []
return this.tools
} catch (error) {
log.error({ server: this.serverName, error }, "Failed to list MCP tools")
return []
}
}
/**
* Execute a tool on this MCP server
*/
async executeTool(name: string, args: Record<string, unknown>): Promise<McpToolResult> {
if (!this.connected) {
await this.connect()
}
log.info({ server: this.serverName, tool: name, args }, "Executing MCP tool")
if (this.config.type === "remote" || this.config.type === "http") {
return this.executeToolHttp(name, args)
}
try {
const response = await this.sendRequest("tools/call", { name, arguments: args }) as McpToolResult
return response
} catch (error) {
log.error({ server: this.serverName, tool: name, error }, "MCP tool execution failed")
return {
content: [{ type: "text", text: `Error: ${error instanceof Error ? error.message : String(error)}` }],
isError: true
}
}
}
/**
* Send a JSON-RPC request to the MCP server
*/
private async sendRequest(method: string, params?: unknown): Promise<unknown> {
if (!this.process?.stdin) {
throw new Error("MCP server not running")
}
const id = ++this.messageId
const request: JsonRpcRequest = {
jsonrpc: "2.0",
id,
method,
params
}
return new Promise((resolve, reject) => {
this.pendingRequests.set(id, { resolve, reject })
const message = JSON.stringify(request) + "\n"
this.process!.stdin!.write(message)
// Timeout after 30 seconds
setTimeout(() => {
if (this.pendingRequests.has(id)) {
this.pendingRequests.delete(id)
reject(new Error(`MCP request timeout: ${method}`))
}
}, 30000)
})
}
/**
* Handle incoming data from the MCP server
*/
private handleData(data: string): void {
this.buffer += data
const lines = this.buffer.split("\n")
this.buffer = lines.pop() || ""
for (const line of lines) {
if (!line.trim()) continue
try {
const message = JSON.parse(line) as JsonRpcResponse
if (message.id !== undefined && this.pendingRequests.has(message.id)) {
const pending = this.pendingRequests.get(message.id)!
this.pendingRequests.delete(message.id)
if (message.error) {
pending.reject(new Error(message.error.message))
} else {
pending.resolve(message.result)
}
}
} catch (e) {
log.warn({ server: this.serverName }, `Failed to parse MCP message: ${line}`)
}
}
}
/**
* Fetch tools from HTTP-based MCP server
*/
private async fetchToolsHttp(): Promise<McpToolDefinition[]> {
if (!this.config.url) return []
try {
const response = await fetch(`${this.config.url}/tools/list`, {
method: "POST",
headers: {
"Content-Type": "application/json",
...this.config.headers
},
body: JSON.stringify({ jsonrpc: "2.0", id: 1, method: "tools/list", params: {} })
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}`)
}
const data = await response.json() as JsonRpcResponse
const result = data.result as { tools?: McpToolDefinition[] }
return result.tools || []
} catch (error) {
log.error({ server: this.serverName, error }, "Failed to fetch HTTP MCP tools")
return []
}
}
/**
* Execute tool on HTTP-based MCP server
*/
private async executeToolHttp(name: string, args: Record<string, unknown>): Promise<McpToolResult> {
if (!this.config.url) {
return { content: [{ type: "text", text: "No URL configured" }], isError: true }
}
try {
const response = await fetch(`${this.config.url}/tools/call`, {
method: "POST",
headers: {
"Content-Type": "application/json",
...this.config.headers
},
body: JSON.stringify({
jsonrpc: "2.0",
id: 1,
method: "tools/call",
params: { name, arguments: args }
})
})
if (!response.ok) {
throw new Error(`HTTP ${response.status}`)
}
const data = await response.json() as JsonRpcResponse
return data.result as McpToolResult
} catch (error) {
return {
content: [{ type: "text", text: `HTTP error: ${error instanceof Error ? error.message : String(error)}` }],
isError: true
}
}
}
isConnected(): boolean {
return this.connected
}
getServerName(): string {
return this.serverName
}
}
/**
* MCP Manager - Manages multiple MCP server connections
*/
export class McpManager {
private clients: Map<string, McpClient> = new Map()
private configPath: string | null = null
/**
* Load MCP config from a workspace
*/
async loadConfig(workspacePath: string): Promise<void> {
const configPath = path.join(workspacePath, ".mcp.json")
this.configPath = configPath
try {
const fs = await import("fs")
if (!fs.existsSync(configPath)) {
log.info({ path: configPath }, "No MCP config found")
return
}
const content = fs.readFileSync(configPath, "utf-8")
const config = JSON.parse(content) as { mcpServers?: Record<string, McpServerConfig> }
if (config.mcpServers) {
for (const [name, serverConfig] of Object.entries(config.mcpServers)) {
this.addServer(name, serverConfig)
}
}
log.info({ servers: Object.keys(config.mcpServers || {}) }, "Loaded MCP config")
} catch (error) {
log.error({ path: configPath, error }, "Failed to load MCP config")
}
}
/**
* Add an MCP server
*/
addServer(name: string, config: McpServerConfig): void {
if (this.clients.has(name)) {
this.clients.get(name)!.disconnect()
}
this.clients.set(name, new McpClient(name, config))
log.info({ server: name }, "Added MCP server")
}
/**
* Remove an MCP server
*/
removeServer(name: string): void {
const client = this.clients.get(name)
if (client) {
client.disconnect()
this.clients.delete(name)
}
}
/**
* Get all available tools from all connected servers
*/
async getAllTools(): Promise<Array<McpToolDefinition & { serverName: string }>> {
const allTools: Array<McpToolDefinition & { serverName: string }> = []
for (const [name, client] of this.clients) {
try {
const tools = await client.listTools()
for (const tool of tools) {
allTools.push({ ...tool, serverName: name })
}
} catch (error) {
log.warn({ server: name, error }, "Failed to get tools from MCP server")
}
}
return allTools
}
/**
* Convert MCP tools to OpenAI-compatible format
*/
async getToolsAsOpenAIFormat(): Promise<Array<{
type: "function"
function: {
name: string
description: string
parameters: McpToolDefinition["inputSchema"]
}
}>> {
const mcpTools = await this.getAllTools()
return mcpTools.map(tool => ({
type: "function" as const,
function: {
// Prefix with server name to avoid conflicts
name: `mcp_${tool.serverName}_${tool.name}`,
description: `[MCP: ${tool.serverName}] ${tool.description}`,
parameters: tool.inputSchema
}
}))
}
/**
* Execute a tool by its full name (mcp_servername_toolname)
*/
async executeTool(fullName: string, args: Record<string, unknown>): Promise<string> {
// Parse mcp_servername_toolname format
const match = fullName.match(/^mcp_([^_]+)_(.+)$/)
if (!match) {
return `Error: Invalid MCP tool name format: ${fullName}`
}
const [, serverName, toolName] = match
const client = this.clients.get(serverName)
if (!client) {
return `Error: MCP server not found: ${serverName}`
}
const result = await client.executeTool(toolName, args)
// Convert result to string
const texts = result.content
.filter(c => c.type === "text" && c.text)
.map(c => c.text!)
return texts.join("\n") || (result.isError ? "Tool execution failed" : "Tool executed successfully")
}
/**
* Connect all configured servers
*/
async connectAll(): Promise<Record<string, { connected: boolean; error?: string }>> {
const results: Record<string, { connected: boolean; error?: string }> = {}
for (const [name, client] of this.clients) {
try {
// Add timeout for connection
const connectPromise = client.connect()
const timeoutPromise = new Promise<never>((_, reject) =>
setTimeout(() => reject(new Error("Connection timeout")), 15000)
)
await Promise.race([connectPromise, timeoutPromise])
results[name] = { connected: true }
log.info({ server: name }, "MCP server connected successfully")
} catch (error) {
const errorMsg = error instanceof Error ? error.message : String(error)
log.warn({ server: name, error: errorMsg }, "Failed to connect MCP server")
results[name] = { connected: false, error: errorMsg }
}
}
return results
}
/**
* Disconnect all servers
*/
disconnectAll(): void {
for (const client of this.clients.values()) {
client.disconnect()
}
this.clients.clear()
}
/**
* Get status of all servers
*/
getStatus(): Record<string, { connected: boolean }> {
const status: Record<string, { connected: boolean }> = {}
for (const [name, client] of this.clients) {
status[name] = { connected: client.isConnected() }
}
return status
}
}
// Singleton instance
let globalMcpManager: McpManager | null = null
export function getMcpManager(): McpManager {
if (!globalMcpManager) {
globalMcpManager = new McpManager()
}
return globalMcpManager
}
export function resetMcpManager(): void {
if (globalMcpManager) {
globalMcpManager.disconnectAll()
globalMcpManager = null
}
}

View File

@@ -0,0 +1,15 @@
/**
* MCP Module Index
* Exports MCP client and manager for external MCP server integration.
*/
export {
McpClient,
McpManager,
getMcpManager,
resetMcpManager,
type McpServerConfig,
type McpToolDefinition,
type McpToolCall,
type McpToolResult
} from "./client"

View File

@@ -22,7 +22,12 @@ import { registerOllamaRoutes } from "./routes/ollama"
import { registerQwenRoutes } from "./routes/qwen"
import { registerZAIRoutes } from "./routes/zai"
import { registerOpenCodeZenRoutes } from "./routes/opencode-zen"
import { registerAntigravityRoutes } from "./routes/antigravity"
import { registerSkillsRoutes } from "./routes/skills"
import { registerContextEngineRoutes } from "./routes/context-engine"
import { registerNativeSessionsRoutes } from "./routes/native-sessions"
import { registerSdkSyncRoutes } from "./routes/sdk-sync"
import { initSessionManager } from "../storage/session-store"
import { ServerMeta } from "../api-types"
import { InstanceStore } from "../storage/instance-store"
@@ -39,6 +44,7 @@ interface HttpServerDeps {
uiStaticDir: string
uiDevServerUrl?: string
logger: Logger
dataDir?: string // For session storage
}
interface HttpServerStartResult {
@@ -55,6 +61,10 @@ export function createHttpServer(deps: HttpServerDeps) {
const apiLogger = deps.logger.child({ component: "http" })
const sseLogger = deps.logger.child({ component: "sse" })
// Initialize session manager for Binary-Free Mode
const dataDir = deps.dataDir || path.join(process.cwd(), ".codenomad-data")
initSessionManager(dataDir)
const sseClients = new Set<() => void>()
const registerSseClient = (cleanup: () => void) => {
sseClients.add(cleanup)
@@ -123,7 +133,24 @@ export function createHttpServer(deps: HttpServerDeps) {
registerQwenRoutes(app, { logger: deps.logger })
registerZAIRoutes(app, { logger: deps.logger })
registerOpenCodeZenRoutes(app, { logger: deps.logger })
registerAntigravityRoutes(app, { logger: deps.logger })
registerSkillsRoutes(app)
registerContextEngineRoutes(app)
// Register Binary-Free Mode native sessions routes
registerNativeSessionsRoutes(app, {
logger: deps.logger,
workspaceManager: deps.workspaceManager,
dataDir,
eventBus: deps.eventBus,
})
// Register SDK session sync routes (for auto-migration from OpenCode to Native)
registerSdkSyncRoutes(app, {
logger: deps.logger,
dataDir,
})
registerInstanceProxyRoutes(app, { workspaceManager: deps.workspaceManager, logger: proxyLogger })

View File

@@ -0,0 +1,688 @@
import { createHash, randomBytes, randomUUID } from "crypto"
import { createServer } from "http"
import { FastifyInstance } from "fastify"
import { AntigravityClient, type ChatRequest, getDefaultAntigravityConfig, type ChatMessage } from "../../integrations/antigravity"
import { Logger } from "../../logger"
import { CORE_TOOLS, executeTools, type ToolCall, type ToolResult } from "../../tools/executor"
import { getMcpManager } from "../../mcp/client"
interface AntigravityRouteDeps {
logger: Logger
}
// Maximum number of tool execution loops
const MAX_TOOL_LOOPS = 10
// Google OAuth Authorization Code + PKCE configuration (Antigravity-compatible)
const GOOGLE_OAUTH_CONFIG = {
clientId: process.env.ANTIGRAVITY_GOOGLE_CLIENT_ID || "1071006060591-tmhssin2h21lcre235vtolojh4g403ep.apps.googleusercontent.com",
clientSecret: process.env.ANTIGRAVITY_GOOGLE_CLIENT_SECRET || "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",
redirectUri: process.env.ANTIGRAVITY_GOOGLE_REDIRECT_URI || "http://localhost:51121/oauth-callback",
authEndpoint: "https://accounts.google.com/o/oauth2/v2/auth",
tokenEndpoint: "https://oauth2.googleapis.com/token",
scopes: [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/userinfo.email",
"https://www.googleapis.com/auth/userinfo.profile",
"https://www.googleapis.com/auth/cclog",
"https://www.googleapis.com/auth/experimentsandconfigs",
],
}
const AUTH_SESSION_TTL_MS = 10 * 60 * 1000
const DEFAULT_POLL_INTERVAL_SEC = 5
const callbackUrl = new URL(GOOGLE_OAUTH_CONFIG.redirectUri)
const callbackPath = callbackUrl.pathname || "/oauth-callback"
const callbackPort = Number(callbackUrl.port || "0") || (callbackUrl.protocol === "https:" ? 443 : 80)
type OAuthSession = {
verifier: string
createdAt: number
expiresAt: number
token?: {
accessToken: string
refreshToken?: string
expiresIn: number
tokenType?: string
scope?: string
}
error?: string
}
// Active OAuth sessions (in-memory, per-server instance)
const oauthSessions = new Map<string, OAuthSession>()
let oauthCallbackServer: ReturnType<typeof createServer> | null = null
function base64UrlEncode(value: Buffer): string {
return value
.toString("base64")
.replace(/\+/g, "-")
.replace(/\//g, "_")
.replace(/=+$/g, "")
}
function createCodeVerifier(): string {
return base64UrlEncode(randomBytes(32))
}
function createCodeChallenge(verifier: string): string {
const digest = createHash("sha256").update(verifier).digest()
return base64UrlEncode(digest)
}
function getAccessTokenFromHeader(authorization: string | undefined): string | null {
if (!authorization) return null
const [type, token] = authorization.split(" ")
if (!type || type.toLowerCase() !== "bearer" || !token) return null
return token.trim()
}
function getProjectIdFromHeader(value: string | string[] | undefined): string | undefined {
if (typeof value === "string" && value.trim()) return value.trim()
if (Array.isArray(value)) {
const entry = value.find((item) => typeof item === "string" && item.trim())
if (entry) return entry.trim()
}
return undefined
}
async function exchangeAuthorizationCode(code: string, verifier: string): Promise<{
accessToken: string
refreshToken?: string
expiresIn: number
tokenType?: string
scope?: string
}> {
const params = new URLSearchParams({
client_id: GOOGLE_OAUTH_CONFIG.clientId,
code,
grant_type: "authorization_code",
redirect_uri: GOOGLE_OAUTH_CONFIG.redirectUri,
code_verifier: verifier,
})
if (GOOGLE_OAUTH_CONFIG.clientSecret) {
params.set("client_secret", GOOGLE_OAUTH_CONFIG.clientSecret)
}
const response = await fetch(GOOGLE_OAUTH_CONFIG.tokenEndpoint, {
method: "POST",
headers: {
"Content-Type": "application/x-www-form-urlencoded",
},
body: params,
})
if (!response.ok) {
const errorText = await response.text()
throw new Error(errorText || `Token exchange failed (${response.status})`)
}
const data = await response.json() as any
return {
accessToken: data.access_token,
refreshToken: data.refresh_token,
expiresIn: data.expires_in,
tokenType: data.token_type,
scope: data.scope,
}
}
function ensureOAuthCallbackServer(logger: Logger): void {
if (oauthCallbackServer) return
oauthCallbackServer = createServer((req, res) => {
void (async () => {
try {
const url = new URL(req.url || "", GOOGLE_OAUTH_CONFIG.redirectUri)
if (url.pathname !== callbackPath) {
res.writeHead(404)
res.end()
return
}
const state = url.searchParams.get("state")
const code = url.searchParams.get("code")
const error = url.searchParams.get("error")
if (!state) {
res.writeHead(400, { "Content-Type": "text/plain" })
res.end("Missing OAuth state.")
return
}
const session = oauthSessions.get(state)
if (!session) {
res.writeHead(404, { "Content-Type": "text/plain" })
res.end("OAuth session not found or expired.")
return
}
if (error) {
session.error = error
res.writeHead(200, { "Content-Type": "text/html" })
res.end("<h2>Sign-in cancelled.</h2><p>You can close this window.</p>")
return
}
if (!code) {
session.error = "Missing authorization code."
res.writeHead(400, { "Content-Type": "text/plain" })
res.end("Missing authorization code.")
return
}
try {
const token = await exchangeAuthorizationCode(code, session.verifier)
session.token = token
session.error = undefined
res.writeHead(200, { "Content-Type": "text/html" })
res.end("<h2>Sign-in complete.</h2><p>You can close this window and return to the app.</p>")
} catch (err) {
const message = err instanceof Error ? err.message : "OAuth callback failed."
session.error = message
res.writeHead(500, { "Content-Type": "text/plain" })
res.end(message)
}
} catch (err) {
const message = err instanceof Error ? err.message : "OAuth callback failed."
res.writeHead(500, { "Content-Type": "text/plain" })
res.end(message)
}
})()
})
oauthCallbackServer.on("error", (err) => {
logger.error({ err, port: callbackPort }, "Antigravity OAuth callback server failed to start")
oauthCallbackServer = null
})
oauthCallbackServer.listen(callbackPort, "127.0.0.1", () => {
logger.info({ port: callbackPort, path: callbackPath }, "Antigravity OAuth callback server listening")
})
}
function cleanupExpiredSessions(): void {
const now = Date.now()
for (const [id, session] of oauthSessions) {
if (session.expiresAt <= now) {
oauthSessions.delete(id)
}
}
}
export async function registerAntigravityRoutes(
app: FastifyInstance,
deps: AntigravityRouteDeps
) {
const logger = deps.logger.child({ component: "antigravity-routes" })
// Create shared client
const client = new AntigravityClient(getDefaultAntigravityConfig())
// List available Antigravity models
app.get('/api/antigravity/models', async (request, reply) => {
try {
const accessToken = getAccessTokenFromHeader(request.headers.authorization)
const models = await client.getModels(accessToken ?? undefined)
return {
models: models.map(m => ({
id: m.id,
name: m.name,
family: m.family,
provider: "antigravity",
free: false, // These require Google OAuth
reasoning: m.reasoning,
tool_call: m.tool_call,
limit: m.limit
}))
}
} catch (error) {
logger.error({ error }, "Failed to list Antigravity models")
return reply.status(500).send({ error: "Failed to list models" })
}
})
// Check authentication status
app.get('/api/antigravity/auth-status', async (request, reply) => {
try {
const accessToken = getAccessTokenFromHeader(request.headers.authorization)
const authenticated = client.isAuthenticated(accessToken ?? undefined)
return { authenticated }
} catch (error) {
logger.error({ error }, "Antigravity auth status check failed")
return reply.status(500).send({ error: "Auth status check failed" })
}
})
// Test connection
app.get('/api/antigravity/test', async (request, reply) => {
try {
const accessToken = getAccessTokenFromHeader(request.headers.authorization)
const projectId = getProjectIdFromHeader(request.headers["x-antigravity-project"])
const result = await client.testConnection(accessToken ?? undefined, projectId)
return result
} catch (error) {
logger.error({ error }, "Antigravity connection test failed")
return reply.status(500).send({ error: "Connection test failed" })
}
})
// ==========================================
// Google OAuth Authorization Flow (PKCE)
// ==========================================
// Step 1: Start OAuth authorization - returns auth URL
app.post('/api/antigravity/device-auth/start', async (request, reply) => {
try {
logger.info("Starting Google OAuth flow for Antigravity")
ensureOAuthCallbackServer(logger)
const sessionId = randomUUID()
const verifier = createCodeVerifier()
const challenge = createCodeChallenge(verifier)
const authUrl = new URL(GOOGLE_OAUTH_CONFIG.authEndpoint)
authUrl.searchParams.set("client_id", GOOGLE_OAUTH_CONFIG.clientId)
authUrl.searchParams.set("response_type", "code")
authUrl.searchParams.set("redirect_uri", GOOGLE_OAUTH_CONFIG.redirectUri)
authUrl.searchParams.set("scope", GOOGLE_OAUTH_CONFIG.scopes.join(" "))
authUrl.searchParams.set("code_challenge", challenge)
authUrl.searchParams.set("code_challenge_method", "S256")
authUrl.searchParams.set("state", sessionId)
authUrl.searchParams.set("access_type", "offline")
authUrl.searchParams.set("prompt", "consent")
oauthSessions.set(sessionId, {
verifier,
createdAt: Date.now(),
expiresAt: Date.now() + AUTH_SESSION_TTL_MS,
})
cleanupExpiredSessions()
return {
sessionId,
userCode: "",
verificationUrl: authUrl.toString(),
expiresIn: Math.floor(AUTH_SESSION_TTL_MS / 1000),
interval: DEFAULT_POLL_INTERVAL_SEC,
}
} catch (error: any) {
logger.error({ error: error.message, stack: error.stack }, "Failed to start OAuth authorization")
return reply.status(500).send({
error: "Failed to start authentication",
details: error.message,
})
}
})
// Step 2: Poll for token (called by client after browser sign-in)
app.post('/api/antigravity/device-auth/poll', async (request, reply) => {
try {
const { sessionId } = request.body as { sessionId: string }
if (!sessionId) {
return reply.status(400).send({ error: "Missing sessionId" })
}
cleanupExpiredSessions()
const session = oauthSessions.get(sessionId)
if (!session) {
return reply.status(404).send({ error: "Session not found or expired" })
}
if (session.expiresAt < Date.now()) {
oauthSessions.delete(sessionId)
return reply.status(410).send({ error: "Session expired" })
}
if (session.error) {
oauthSessions.delete(sessionId)
return { status: "error", error: session.error }
}
if (!session.token) {
return { status: "pending", interval: DEFAULT_POLL_INTERVAL_SEC }
}
const token = session.token
oauthSessions.delete(sessionId)
return {
status: "success",
accessToken: token.accessToken,
refreshToken: token.refreshToken,
expiresIn: token.expiresIn,
tokenType: token.tokenType,
scope: token.scope,
}
} catch (error) {
logger.error({ error }, "Failed to poll for token")
return reply.status(500).send({ error: "Failed to poll for token" })
}
})
// Refresh an expired token
app.post('/api/antigravity/device-auth/refresh', async (request, reply) => {
try {
const { refreshToken } = request.body as { refreshToken: string }
if (!refreshToken) {
return reply.status(400).send({ error: "Missing refreshToken" })
}
const params = new URLSearchParams({
client_id: GOOGLE_OAUTH_CONFIG.clientId,
refresh_token: refreshToken,
grant_type: "refresh_token",
})
if (GOOGLE_OAUTH_CONFIG.clientSecret) {
params.set("client_secret", GOOGLE_OAUTH_CONFIG.clientSecret)
}
const response = await fetch(GOOGLE_OAUTH_CONFIG.tokenEndpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/x-www-form-urlencoded'
},
body: params
})
if (!response.ok) {
const error = await response.text()
logger.error({ error }, "Token refresh failed")
return reply.status(401).send({ error: "Token refresh failed" })
}
const data = await response.json() as any
return {
accessToken: data.access_token,
expiresIn: data.expires_in,
tokenType: data.token_type
}
} catch (error) {
logger.error({ error }, "Failed to refresh token")
return reply.status(500).send({ error: "Failed to refresh token" })
}
})
// Chat completion endpoint WITH MCP TOOL SUPPORT
app.post('/api/antigravity/chat', async (request, reply) => {
try {
const chatRequest = request.body as ChatRequest & {
workspacePath?: string
enableTools?: boolean
}
const accessToken = getAccessTokenFromHeader(request.headers.authorization)
const projectId = getProjectIdFromHeader(request.headers["x-antigravity-project"])
// Extract workspace path for tool execution
const workspacePath = chatRequest.workspacePath || process.cwd()
const enableTools = chatRequest.enableTools !== false
logger.info({
workspacePath,
receivedWorkspacePath: chatRequest.workspacePath,
enableTools,
model: chatRequest.model
}, "Antigravity chat request received")
// Handle streaming with tool loop
if (chatRequest.stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
})
try {
await streamWithToolLoop(
client,
chatRequest,
accessToken ?? undefined,
projectId,
workspacePath,
enableTools,
reply.raw,
logger
)
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "Antigravity streaming failed")
reply.raw.write(`data: ${JSON.stringify({ error: String(streamError) })}\n\n`)
reply.raw.end()
}
} else {
// Non-streaming with tool loop
const response = await chatWithToolLoop(
client,
chatRequest,
accessToken ?? undefined,
projectId,
workspacePath,
enableTools,
logger
)
return response
}
} catch (error) {
logger.error({ error }, "Antigravity chat request failed")
return reply.status(500).send({ error: "Chat request failed" })
}
})
logger.info("Antigravity routes registered with Google OAuth flow!")
}
/**
* Streaming chat with tool execution loop
*/
async function streamWithToolLoop(
client: AntigravityClient,
request: ChatRequest,
accessToken: string | undefined,
projectId: string | undefined,
workspacePath: string,
enableTools: boolean,
rawResponse: any,
logger: Logger
): Promise<void> {
let messages = [...request.messages]
let loopCount = 0
// Load MCP tools from workspace config
let allTools = [...CORE_TOOLS]
if (enableTools && workspacePath) {
try {
const mcpManager = getMcpManager()
await mcpManager.loadConfig(workspacePath)
const mcpTools = await mcpManager.getToolsAsOpenAIFormat()
allTools = [...CORE_TOOLS, ...mcpTools]
if (mcpTools.length > 0) {
logger.info({ mcpToolCount: mcpTools.length }, "Loaded MCP tools for Antigravity")
}
} catch (mcpError) {
logger.warn({ error: mcpError }, "Failed to load MCP tools")
}
}
// Inject tools if enabled
const requestWithTools: ChatRequest = {
...request,
tools: enableTools ? allTools : undefined,
tool_choice: enableTools ? "auto" : undefined
}
while (loopCount < MAX_TOOL_LOOPS) {
loopCount++
// Accumulate tool calls from stream
let accumulatedToolCalls: { [index: number]: { id: string; name: string; arguments: string } } = {}
let hasToolCalls = false
let textContent = ""
// Stream response
for await (const chunk of client.chatStream({ ...requestWithTools, messages }, accessToken, projectId)) {
// Write chunk to client
rawResponse.write(`data: ${JSON.stringify(chunk)}\n\n`)
const choice = chunk.choices[0]
if (!choice) continue
// Accumulate text content
if (choice.delta?.content) {
textContent += choice.delta.content
}
// Accumulate tool calls from delta (if API supports it)
const deltaToolCalls = (choice.delta as any)?.tool_calls
if (deltaToolCalls) {
hasToolCalls = true
for (const tc of deltaToolCalls) {
const idx = tc.index ?? 0
if (!accumulatedToolCalls[idx]) {
accumulatedToolCalls[idx] = { id: tc.id || "", name: "", arguments: "" }
}
if (tc.id) accumulatedToolCalls[idx].id = tc.id
if (tc.function?.name) accumulatedToolCalls[idx].name += tc.function.name
if (tc.function?.arguments) accumulatedToolCalls[idx].arguments += tc.function.arguments
}
}
// Check if we should stop
if (choice.finish_reason === "stop") {
rawResponse.write('data: [DONE]\n\n')
return
}
}
// If no tool calls, we're done
if (!hasToolCalls || !enableTools) {
rawResponse.write('data: [DONE]\n\n')
return
}
// Convert accumulated tool calls
const toolCalls: ToolCall[] = Object.values(accumulatedToolCalls).map(tc => ({
id: tc.id,
type: "function" as const,
function: {
name: tc.name,
arguments: tc.arguments
}
}))
if (toolCalls.length === 0) {
rawResponse.write('data: [DONE]\n\n')
return
}
logger.info({ toolCalls: toolCalls.map(tc => tc.function.name) }, "Executing tool calls")
// Add assistant message with tool calls
const assistantMessage: ChatMessage = {
role: "assistant",
content: textContent || undefined,
tool_calls: toolCalls
}
messages.push(assistantMessage)
// Execute tools
const toolResults = await executeTools(workspacePath, toolCalls)
// Notify client about tool execution via special event
for (const result of toolResults) {
const toolEvent = {
type: "tool_result",
tool_call_id: result.tool_call_id,
content: result.content
}
rawResponse.write(`data: ${JSON.stringify(toolEvent)}\n\n`)
}
// Add tool results to messages
for (const result of toolResults) {
const toolMessage: ChatMessage = {
role: "tool",
content: result.content,
tool_call_id: result.tool_call_id
}
messages.push(toolMessage)
}
logger.info({ loopCount, toolsExecuted: toolResults.length }, "Tool loop iteration complete")
}
logger.warn({ loopCount }, "Max tool loops reached")
rawResponse.write('data: [DONE]\n\n')
}
/**
* Non-streaming chat with tool execution loop
*/
async function chatWithToolLoop(
client: AntigravityClient,
request: ChatRequest,
accessToken: string | undefined,
projectId: string | undefined,
workspacePath: string,
enableTools: boolean,
logger: Logger
): Promise<any> {
let messages = [...request.messages]
let loopCount = 0
let lastResponse: any = null
// Inject tools if enabled
const requestWithTools: ChatRequest = {
...request,
tools: enableTools ? CORE_TOOLS : undefined,
tool_choice: enableTools ? "auto" : undefined
}
while (loopCount < MAX_TOOL_LOOPS) {
loopCount++
const response = await client.chat({ ...requestWithTools, messages, stream: false }, accessToken, projectId)
lastResponse = response
const choice = response.choices[0]
if (!choice) break
const toolCalls = (choice.message as any)?.tool_calls
// If no tool calls, return
if (!toolCalls || toolCalls.length === 0 || !enableTools) {
return response
}
logger.info({ toolCalls: toolCalls.map((tc: any) => tc.function.name) }, "Executing tool calls")
// Add assistant message
const assistantMessage: ChatMessage = {
role: "assistant",
content: (choice.message as any).content || undefined,
tool_calls: toolCalls
}
messages.push(assistantMessage)
// Execute tools
const toolResults = await executeTools(workspacePath, toolCalls)
// Add tool results
for (const result of toolResults) {
const toolMessage: ChatMessage = {
role: "tool",
content: result.content,
tool_call_id: result.tool_call_id
}
messages.push(toolMessage)
}
logger.info({ loopCount, toolsExecuted: toolResults.length }, "Tool loop iteration complete")
}
logger.warn({ loopCount }, "Max tool loops reached")
return lastResponse
}

View File

@@ -0,0 +1,130 @@
/**
* Context-Engine API routes
* Provides endpoints for querying the Context-Engine status and manually triggering operations.
*/
import type { FastifyInstance } from "fastify"
import { getContextEngineService } from "../../context-engine"
export function registerContextEngineRoutes(app: FastifyInstance) {
// Get Context-Engine status
app.get("/api/context-engine/status", async (request, reply) => {
const service = getContextEngineService()
if (!service) {
return reply.send({
status: "stopped",
message: "Context-Engine service not initialized"
})
}
const status = service.getStatus()
const client = service.getClient()
// Get more detailed status from the engine if it's running
let details: Record<string, unknown> = {}
if (service.isReady()) {
try {
const engineStatus = await client.getStatus()
details = {
indexing: engineStatus.indexing,
indexed_files: engineStatus.indexed_files,
last_indexed: engineStatus.last_indexed
}
} catch {
// Ignore errors, just don't include details
}
}
return reply.send({
status,
ready: service.isReady(),
...details
})
})
// Get Context-Engine health
app.get("/api/context-engine/health", async (request, reply) => {
const service = getContextEngineService()
if (!service) {
return reply.send({ status: "unhealthy", reason: "Service not initialized" })
}
const client = service.getClient()
const health = await client.health()
return reply.send(health)
})
// Manually trigger indexing for a path
app.post("/api/context-engine/index", {
schema: {
body: {
type: "object",
required: ["path"],
properties: {
path: { type: "string" }
}
}
}
}, async (request, reply) => {
const service = getContextEngineService()
if (!service) {
return reply.status(503).send({
error: "Context-Engine service not available"
})
}
const { path } = request.body as { path: string }
// Start indexing (non-blocking)
service.indexPath(path).catch(() => {
// Errors are logged internally
})
return reply.send({
status: "started",
message: `Indexing started for: ${path}`
})
})
// Query the Context-Engine
app.post("/api/context-engine/query", {
schema: {
body: {
type: "object",
required: ["query"],
properties: {
query: { type: "string" },
context_window: { type: "number" }
}
}
}
}, async (request, reply) => {
const service = getContextEngineService()
if (!service || !service.isReady()) {
return reply.status(503).send({
error: "Context-Engine not ready",
results: [],
total_results: 0
})
}
const { query, context_window } = request.body as { query: string; context_window?: number }
const client = service.getClient()
try {
const response = await client.query(query, context_window ?? 4096)
return reply.send(response)
} catch (error) {
return reply.status(500).send({
error: error instanceof Error ? error.message : "Query failed",
results: [],
total_results: 0
})
}
})
}

View File

@@ -1,5 +1,6 @@
import { FastifyInstance } from "fastify"
import os from "os"
import { existsSync } from "fs"
import { NetworkAddress, ServerMeta, PortAvailabilityResponse } from "../../api-types"
import { getAvailablePort } from "../../utils/port"
@@ -7,8 +8,54 @@ interface RouteDeps {
serverMeta: ServerMeta
}
export interface ModeInfo {
mode: "lite" | "full"
binaryFreeMode: boolean
nativeSessions: boolean
opencodeBinaryAvailable: boolean
providers: {
qwen: boolean
zai: boolean
zen: boolean
}
}
export function registerMetaRoutes(app: FastifyInstance, deps: RouteDeps) {
app.get("/api/meta", async () => buildMetaResponse(deps.serverMeta))
// Mode detection endpoint for Binary-Free Mode
app.get("/api/meta/mode", async (): Promise<ModeInfo> => {
// Check if any OpenCode binary is available
const opencodePaths = [
process.env.OPENCODE_PATH,
"opencode",
"opencode.exe",
].filter(Boolean) as string[]
let binaryAvailable = false
for (const p of opencodePaths) {
if (existsSync(p)) {
binaryAvailable = true
break
}
}
// In Binary-Free Mode, we use native session management
const binaryFreeMode = !binaryAvailable
return {
mode: binaryFreeMode ? "lite" : "full",
binaryFreeMode,
nativeSessions: true, // Native sessions are always available
opencodeBinaryAvailable: binaryAvailable,
providers: {
qwen: true, // Always available
zai: true, // Always available
zen: true, // Always available (needs API key)
}
}
})
app.get("/api/ports/available", async () => {
const port = await getAvailablePort(3000)
const response: PortAvailabilityResponse = { port }

View File

@@ -0,0 +1,747 @@
/**
* Native Sessions API Routes - Binary-Free Mode
*
* These routes provide session management without requiring the OpenCode binary.
* They're used when running in "Lite Mode" or when OpenCode is unavailable.
*/
import { FastifyInstance } from "fastify"
import { Logger } from "../../logger"
import { getSessionManager, Session, SessionMessage } from "../../storage/session-store"
import { CORE_TOOLS, executeTools, type ToolCall, type ToolResult } from "../../tools/executor"
import { getMcpManager } from "../../mcp/client"
import { WorkspaceManager } from "../../workspaces/manager"
import { OpenCodeZenClient, ChatMessage } from "../../integrations/opencode-zen"
import { EventBus } from "../../events/bus"
interface NativeSessionsDeps {
logger: Logger
workspaceManager: WorkspaceManager
dataDir: string
eventBus?: EventBus
}
// Maximum tool execution loops to prevent infinite loops
const MAX_TOOL_LOOPS = 10
export function registerNativeSessionsRoutes(app: FastifyInstance, deps: NativeSessionsDeps) {
const logger = deps.logger.child({ component: "native-sessions" })
const sessionManager = getSessionManager(deps.dataDir)
// List all sessions for a workspace
app.get<{ Params: { workspaceId: string } }>("/api/native/workspaces/:workspaceId/sessions", async (request, reply) => {
try {
const sessions = await sessionManager.listSessions(request.params.workspaceId)
return { sessions }
} catch (error) {
logger.error({ error }, "Failed to list sessions")
reply.code(500)
return { error: "Failed to list sessions" }
}
})
// Create a new session
app.post<{
Params: { workspaceId: string }
Body: { title?: string; parentId?: string; model?: { providerId: string; modelId: string }; agent?: string }
}>("/api/native/workspaces/:workspaceId/sessions", async (request, reply) => {
try {
const session = await sessionManager.createSession(request.params.workspaceId, request.body)
// Emit session created event (using any for custom event type)
if (deps.eventBus) {
deps.eventBus.publish({
type: "native.session.created",
workspaceId: request.params.workspaceId,
session
} as any)
}
reply.code(201)
return { session }
} catch (error) {
logger.error({ error }, "Failed to create session")
reply.code(500)
return { error: "Failed to create session" }
}
})
// Get a specific session
app.get<{ Params: { workspaceId: string; sessionId: string } }>("/api/native/workspaces/:workspaceId/sessions/:sessionId", async (request, reply) => {
try {
const session = await sessionManager.getSession(request.params.workspaceId, request.params.sessionId)
if (!session) {
reply.code(404)
return { error: "Session not found" }
}
return { session }
} catch (error) {
logger.error({ error }, "Failed to get session")
reply.code(500)
return { error: "Failed to get session" }
}
})
// Update a session
app.patch<{
Params: { workspaceId: string; sessionId: string }
Body: Partial<Session>
}>("/api/native/workspaces/:workspaceId/sessions/:sessionId", async (request, reply) => {
try {
const session = await sessionManager.updateSession(
request.params.workspaceId,
request.params.sessionId,
request.body
)
if (!session) {
reply.code(404)
return { error: "Session not found" }
}
return { session }
} catch (error) {
logger.error({ error }, "Failed to update session")
reply.code(500)
return { error: "Failed to update session" }
}
})
// Fork a session
app.post<{
Params: { workspaceId: string; sessionId: string }
}>("/api/native/workspaces/:workspaceId/sessions/:sessionId/fork", async (request, reply) => {
try {
const session = await sessionManager.forkSession(
request.params.workspaceId,
request.params.sessionId
)
return { session }
} catch (error) {
logger.error({ error }, "Failed to fork session")
reply.code(500)
return { error: "Failed to fork session" }
}
})
// Revert a session
app.post<{
Params: { workspaceId: string; sessionId: string }
Body: { messageId?: string }
}>("/api/native/workspaces/:workspaceId/sessions/:sessionId/revert", async (request, reply) => {
try {
const session = await sessionManager.revert(
request.params.workspaceId,
request.params.sessionId,
request.body.messageId
)
return { session }
} catch (error) {
logger.error({ error }, "Failed to revert session")
reply.code(500)
return { error: "Failed to revert session" }
}
})
// Delete a session
app.delete<{ Params: { workspaceId: string; sessionId: string } }>("/api/native/workspaces/:workspaceId/sessions/:sessionId", async (request, reply) => {
try {
const deleted = await sessionManager.deleteSession(request.params.workspaceId, request.params.sessionId)
if (!deleted) {
reply.code(404)
return { error: "Session not found" }
}
reply.code(204)
return
} catch (error) {
logger.error({ error }, "Failed to delete session")
reply.code(500)
return { error: "Failed to delete session" }
}
})
// Import sessions from SDK mode - for migration when switching to native mode
app.post<{
Params: { workspaceId: string }
Body: {
sessions: Array<{
id: string
title?: string
parentId?: string | null
createdAt?: number
updatedAt?: number
model?: { providerId: string; modelId: string }
agent?: string
messages?: Array<{
id: string
role: "user" | "assistant" | "system" | "tool"
content?: string
createdAt?: number
}>
}>
}
}>("/api/native/workspaces/:workspaceId/sessions/import", async (request, reply) => {
try {
const result = await sessionManager.importSessions(
request.params.workspaceId,
request.body.sessions
)
logger.info({ workspaceId: request.params.workspaceId, ...result }, "Sessions imported from SDK mode")
return { success: true, ...result }
} catch (error) {
logger.error({ error }, "Failed to import sessions")
reply.code(500)
return { error: "Failed to import sessions" }
}
})
// Get messages for a session
app.get<{ Params: { workspaceId: string; sessionId: string } }>("/api/native/workspaces/:workspaceId/sessions/:sessionId/messages", async (request, reply) => {
try {
const messages = await sessionManager.getSessionMessages(
request.params.workspaceId,
request.params.sessionId
)
return { messages }
} catch (error) {
logger.error({ error }, "Failed to get messages")
reply.code(500)
return { error: "Failed to get messages" }
}
})
// Append messages to a session (client-side persistence)
app.post<{
Params: { workspaceId: string; sessionId: string }
Body: {
messages: Array<{
id?: string
role: "user" | "assistant" | "system" | "tool"
content?: string
createdAt?: number
updatedAt?: number
status?: "pending" | "streaming" | "completed" | "error"
}>
}
}>("/api/native/workspaces/:workspaceId/sessions/:sessionId/messages", async (request, reply) => {
const { workspaceId, sessionId } = request.params
const payload = request.body?.messages
if (!Array.isArray(payload)) {
reply.code(400)
return { error: "messages array is required" }
}
try {
const results: SessionMessage[] = []
for (const entry of payload) {
if (!entry || typeof entry.role !== "string") {
continue
}
const saved = await sessionManager.addMessage(workspaceId, sessionId, {
id: entry.id,
role: entry.role,
content: entry.content,
createdAt: entry.createdAt,
updatedAt: entry.updatedAt,
status: entry.status,
})
results.push(saved)
}
return { messages: results }
} catch (error) {
logger.error({ error }, "Failed to append messages")
reply.code(500)
return { error: "Failed to append messages" }
}
})
// Add a message (user prompt) and get streaming response
app.post<{
Params: { workspaceId: string; sessionId: string }
Body: {
content: string
provider: "qwen" | "zai" | "zen"
model?: string
accessToken?: string
resourceUrl?: string
enableTools?: boolean
systemPrompt?: string
}
}>("/api/native/workspaces/:workspaceId/sessions/:sessionId/prompt", async (request, reply) => {
const { workspaceId, sessionId } = request.params
const { content, provider, model, accessToken, resourceUrl, enableTools = true, systemPrompt } = request.body
try {
// Add user message
const userMessage = await sessionManager.addMessage(workspaceId, sessionId, {
role: "user",
content,
status: "completed",
})
// Get workspace path
const workspace = deps.workspaceManager.get(workspaceId)
const workspacePath = workspace?.path ?? process.cwd()
// Get all messages for context
const allMessages = await sessionManager.getSessionMessages(workspaceId, sessionId)
// Build chat messages array
const chatMessages: ChatMessage[] = []
// Add system prompt if provided
if (systemPrompt) {
chatMessages.push({ role: "system", content: systemPrompt })
}
// Add conversation history
for (const m of allMessages) {
if (m.role === "user" || m.role === "assistant" || m.role === "system") {
chatMessages.push({ role: m.role, content: m.content ?? "" })
}
}
// Load MCP tools
let allTools = [...CORE_TOOLS]
if (enableTools) {
try {
const mcpManager = getMcpManager()
await mcpManager.loadConfig(workspacePath)
const mcpTools = await mcpManager.getToolsAsOpenAIFormat()
allTools = [...CORE_TOOLS, ...mcpTools]
} catch (mcpError) {
logger.warn({ error: mcpError }, "Failed to load MCP tools")
}
}
// Create streaming response
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'X-Accel-Buffering': 'no',
})
// Create assistant message placeholder
const assistantMessage = await sessionManager.addMessage(workspaceId, sessionId, {
role: "assistant",
content: "",
status: "streaming",
})
let fullContent = ""
try {
// Route to the appropriate provider
fullContent = await streamWithProvider({
provider,
model,
accessToken,
resourceUrl,
messages: chatMessages,
tools: enableTools ? allTools : [],
workspacePath,
rawResponse: reply.raw,
logger,
})
} catch (streamError) {
logger.error({ error: streamError }, "Stream error")
reply.raw.write(`data: ${JSON.stringify({ error: String(streamError) })}\n\n`)
}
// Update assistant message with full content
await sessionManager.updateMessage(workspaceId, assistantMessage.id, {
content: fullContent,
status: "completed",
})
// Emit message event (using any for custom event type)
if (deps.eventBus) {
deps.eventBus.publish({
type: "native.message.completed",
workspaceId,
sessionId,
messageId: assistantMessage.id,
} as any)
}
reply.raw.write('data: [DONE]\n\n')
reply.raw.end()
} catch (error) {
logger.error({ error }, "Failed to process prompt")
if (!reply.sent) {
reply.code(500)
return { error: "Failed to process prompt" }
}
}
})
// SSE endpoint for session events
app.get<{ Params: { workspaceId: string } }>("/api/native/workspaces/:workspaceId/events", async (request, reply) => {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
'X-Accel-Buffering': 'no',
})
// Send initial ping
reply.raw.write(`data: ${JSON.stringify({ type: "ping" })}\n\n`)
// Keep connection alive
const keepAlive = setInterval(() => {
reply.raw.write(`data: ${JSON.stringify({ type: "ping" })}\n\n`)
}, 30000)
// Handle client disconnect
request.raw.on("close", () => {
clearInterval(keepAlive)
})
})
logger.info("Native sessions routes registered (Binary-Free Mode)")
}
/**
* Stream chat with the appropriate provider
*/
async function streamWithProvider(opts: {
provider: "qwen" | "zai" | "zen"
model?: string
accessToken?: string
resourceUrl?: string
messages: ChatMessage[]
tools: any[]
workspacePath: string
rawResponse: any
logger: Logger
}): Promise<string> {
const { provider, model, accessToken, resourceUrl, messages, tools, workspacePath, rawResponse, logger } = opts
let fullContent = ""
let loopCount = 0
let currentMessages = [...messages]
// Tool execution loop
while (loopCount < MAX_TOOL_LOOPS) {
loopCount++
let responseContent = ""
let toolCalls: ToolCall[] = []
// Route to the appropriate provider
switch (provider) {
case "zen":
const zenResult = await streamWithZen(model, currentMessages, tools, rawResponse, logger)
responseContent = zenResult.content
toolCalls = zenResult.toolCalls
break
case "qwen":
const qwenResult = await streamWithQwen(accessToken, resourceUrl, model, currentMessages, tools, rawResponse, logger)
responseContent = qwenResult.content
toolCalls = qwenResult.toolCalls
break
case "zai":
const zaiResult = await streamWithZAI(accessToken, model, currentMessages, tools, rawResponse, logger)
responseContent = zaiResult.content
toolCalls = zaiResult.toolCalls
break
}
fullContent += responseContent
// If no tool calls, we're done
if (toolCalls.length === 0) {
break
}
// Execute tools
logger.info({ toolCount: toolCalls.length }, "Executing tool calls")
// Add assistant message with tool calls
currentMessages.push({
role: "assistant",
content: responseContent,
tool_calls: toolCalls.map(tc => ({
id: tc.id,
type: "function" as const,
function: tc.function
}))
})
// Execute each tool and add result
const toolResults = await executeTools(workspacePath, toolCalls)
for (let i = 0; i < toolCalls.length; i++) {
const tc = toolCalls[i]
const result = toolResults[i]
// Emit tool execution event
rawResponse.write(`data: ${JSON.stringify({
type: "tool_execution",
tool: tc.function.name,
result: result?.content?.substring(0, 200) // Preview
})}\n\n`)
currentMessages.push({
role: "tool",
content: result?.content ?? "Tool execution failed",
tool_call_id: tc.id
})
}
}
return fullContent
}
/**
* Stream with OpenCode Zen (free models)
*/
async function streamWithZen(
model: string | undefined,
messages: ChatMessage[],
tools: any[],
rawResponse: any,
logger: Logger
): Promise<{ content: string; toolCalls: ToolCall[] }> {
const zenClient = new OpenCodeZenClient()
let content = ""
const toolCalls: ToolCall[] = []
try {
const stream = zenClient.chatStream({
model: model ?? "gpt-5-nano",
messages,
stream: true,
tools: tools.length > 0 ? tools : undefined,
tool_choice: tools.length > 0 ? "auto" : undefined,
})
for await (const chunk of stream) {
const delta = chunk.choices?.[0]?.delta
if (delta?.content) {
content += delta.content
rawResponse.write(`data: ${JSON.stringify({ choices: [{ delta: { content: delta.content } }] })}\n\n`)
}
// Handle tool calls (if model supports them)
const deltaToolCalls = (delta as any)?.tool_calls
if (deltaToolCalls) {
for (const tc of deltaToolCalls) {
if (tc.function?.name) {
toolCalls.push({
id: tc.id,
type: "function",
function: {
name: tc.function.name,
arguments: tc.function.arguments ?? "{}"
}
})
}
}
}
}
} catch (error) {
logger.error({ error }, "Zen streaming error")
throw error
}
return { content, toolCalls }
}
/**
* Stream with Qwen API
*/
async function streamWithQwen(
accessToken: string | undefined,
resourceUrl: string | undefined,
model: string | undefined,
messages: ChatMessage[],
tools: any[],
rawResponse: any,
logger: Logger
): Promise<{ content: string; toolCalls: ToolCall[] }> {
if (!accessToken) {
throw new Error("Qwen access token required. Please authenticate with Qwen first.")
}
const baseUrl = resourceUrl ?? "https://chat.qwen.ai"
let content = ""
const toolCalls: ToolCall[] = []
try {
const response = await fetch(`${baseUrl}/api/v1/chat/completions`, {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": `Bearer ${accessToken}`,
},
body: JSON.stringify({
model: model ?? "qwen-plus-latest",
messages,
stream: true,
tools: tools.length > 0 ? tools : undefined,
tool_choice: tools.length > 0 ? "auto" : undefined,
})
})
if (!response.ok) {
const error = await response.text()
throw new Error(`Qwen API error: ${response.status} - ${error}`)
}
const reader = response.body?.getReader()
if (!reader) throw new Error("No response body")
const decoder = new TextDecoder()
let buffer = ""
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split("\n")
buffer = lines.pop() ?? ""
for (const line of lines) {
if (line.startsWith("data: ")) {
const data = line.slice(6)
if (data === "[DONE]") continue
try {
const parsed = JSON.parse(data)
const delta = parsed.choices?.[0]?.delta
if (delta?.content) {
content += delta.content
rawResponse.write(`data: ${JSON.stringify({ choices: [{ delta: { content: delta.content } }] })}\n\n`)
}
if (delta?.tool_calls) {
for (const tc of delta.tool_calls) {
if (tc.function?.name) {
toolCalls.push({
id: tc.id ?? `call_${Date.now()}`,
type: "function",
function: {
name: tc.function.name,
arguments: tc.function.arguments ?? "{}"
}
})
}
}
}
} catch {
// Skip invalid JSON
}
}
}
}
} catch (error) {
logger.error({ error }, "Qwen streaming error")
throw error
}
return { content, toolCalls }
}
/**
* Stream with Z.AI API
*/
async function streamWithZAI(
accessToken: string | undefined,
model: string | undefined,
messages: ChatMessage[],
tools: any[],
rawResponse: any,
logger: Logger
): Promise<{ content: string; toolCalls: ToolCall[] }> {
let content = ""
const toolCalls: ToolCall[] = []
const baseUrl = "https://api.z.ai/api"
try {
const headers: Record<string, string> = {
"Content-Type": "application/json",
}
if (!accessToken) {
throw new Error("Z.AI API key required. Please authenticate with Z.AI first.")
}
headers["Authorization"] = `Bearer ${accessToken}`
const response = await fetch(`${baseUrl}/paas/v4/chat/completions`, {
method: "POST",
headers,
body: JSON.stringify({
model: model ?? "glm-4.7",
messages,
stream: true,
tools: tools.length > 0 ? tools : undefined,
tool_choice: tools.length > 0 ? "auto" : undefined,
})
})
if (!response.ok) {
const error = await response.text()
throw new Error(`Z.AI API error: ${response.status} - ${error}`)
}
const reader = response.body?.getReader()
if (!reader) throw new Error("No response body")
const decoder = new TextDecoder()
let buffer = ""
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split("\n")
buffer = lines.pop() ?? ""
for (const line of lines) {
if (line.startsWith("data: ")) {
const data = line.slice(6)
if (data === "[DONE]") continue
try {
const parsed = JSON.parse(data)
const delta = parsed.choices?.[0]?.delta
if (delta?.content) {
content += delta.content
rawResponse.write(`data: ${JSON.stringify({ choices: [{ delta: { content: delta.content } }] })}\n\n`)
}
if (delta?.tool_calls) {
for (const tc of delta.tool_calls) {
if (tc.function?.name) {
toolCalls.push({
id: tc.id ?? `call_${Date.now()}`,
type: "function",
function: {
name: tc.function.name,
arguments: tc.function.arguments ?? "{}"
}
})
}
}
}
} catch {
// Skip invalid JSON
}
}
}
}
} catch (error) {
logger.error({ error }, "Z.AI streaming error")
throw error
}
return { content, toolCalls }
}

View File

@@ -9,10 +9,13 @@ import {
import { Logger } from "../../logger"
import fs from "fs"
import path from "path"
import { getUserIntegrationsDir } from "../../user-data"
import { getUserIntegrationsDir, getUserIdFromRequest } from "../../user-context"
const CONFIG_DIR = getUserIntegrationsDir()
const CONFIG_FILE = path.join(CONFIG_DIR, "ollama-config.json")
// Helper to get config file path for a user
function getConfigFile(userId?: string | null): string {
const configDir = getUserIntegrationsDir(userId || undefined)
return path.join(configDir, "ollama-config.json")
}
interface OllamaRouteDeps {
logger: Logger
@@ -26,7 +29,8 @@ export async function registerOllamaRoutes(
app.get('/api/ollama/config', async (request, reply) => {
try {
const config = getOllamaConfig()
const userId = getUserIdFromRequest(request)
const config = getOllamaConfig(userId)
return { config: { ...config, apiKey: config.apiKey ? '***' : undefined } }
} catch (error) {
logger.error({ error }, "Failed to get Ollama config")
@@ -48,9 +52,10 @@ export async function registerOllamaRoutes(
}
}, async (request, reply) => {
try {
const userId = getUserIdFromRequest(request)
const { enabled, apiKey, endpoint } = request.body as any
updateOllamaConfig({ enabled, apiKey, endpoint })
logger.info("Ollama Cloud configuration updated")
updateOllamaConfig({ enabled, apiKey, endpoint }, userId)
logger.info({ userId }, "Ollama Cloud configuration updated for user")
return { success: true, config: { enabled, endpoint, apiKey: apiKey ? '***' : undefined } }
} catch (error) {
logger.error({ error }, "Failed to update Ollama config")
@@ -60,7 +65,8 @@ export async function registerOllamaRoutes(
app.post('/api/ollama/test', async (request, reply) => {
try {
const config = getOllamaConfig()
const userId = getUserIdFromRequest(request)
const config = getOllamaConfig(userId)
if (!config.enabled) {
return reply.status(400).send({ error: "Ollama Cloud is not enabled" })
}
@@ -556,24 +562,27 @@ export async function registerOllamaRoutes(
logger.info("Ollama Cloud routes registered")
}
function getOllamaConfig(): OllamaCloudConfig {
function getOllamaConfig(userId?: string | null): OllamaCloudConfig {
const configFile = getConfigFile(userId)
try {
if (!fs.existsSync(CONFIG_FILE)) {
if (!fs.existsSync(configFile)) {
return { enabled: false, endpoint: "https://ollama.com" }
}
const data = fs.readFileSync(CONFIG_FILE, 'utf-8')
const data = fs.readFileSync(configFile, 'utf-8')
return JSON.parse(data)
} catch {
return { enabled: false, endpoint: "https://ollama.com" }
}
}
function updateOllamaConfig(config: Partial<OllamaCloudConfig>): void {
function updateOllamaConfig(config: Partial<OllamaCloudConfig>, userId?: string | null): void {
const configFile = getConfigFile(userId)
const configDir = getUserIntegrationsDir(userId || undefined)
try {
if (!fs.existsSync(CONFIG_DIR)) {
fs.mkdirSync(CONFIG_DIR, { recursive: true })
if (!fs.existsSync(configDir)) {
fs.mkdirSync(configDir, { recursive: true })
}
const current = getOllamaConfig()
const current = getOllamaConfig(userId)
// Only update apiKey if a new non-empty value is provided
const updated = {
@@ -583,8 +592,8 @@ function updateOllamaConfig(config: Partial<OllamaCloudConfig>): void {
apiKey: config.apiKey || current.apiKey
}
fs.writeFileSync(CONFIG_FILE, JSON.stringify(updated, null, 2))
console.log(`[Ollama] Config saved: enabled=${updated.enabled}, endpoint=${updated.endpoint}, hasApiKey=${!!updated.apiKey}`)
fs.writeFileSync(configFile, JSON.stringify(updated, null, 2))
console.log(`[Ollama] Config saved for user ${userId || "default"}: enabled=${updated.enabled}, endpoint=${updated.endpoint}, hasApiKey=${!!updated.apiKey}`)
} catch (error) {
console.error("Failed to save Ollama config:", error)
}

View File

@@ -1,11 +1,16 @@
import { FastifyInstance } from "fastify"
import { OpenCodeZenClient, type ChatRequest, getDefaultZenConfig } from "../../integrations/opencode-zen"
import { OpenCodeZenClient, type ChatRequest, getDefaultZenConfig, type ChatMessage } from "../../integrations/opencode-zen"
import { Logger } from "../../logger"
import { CORE_TOOLS, executeTools, type ToolCall, type ToolResult } from "../../tools/executor"
import { getMcpManager } from "../../mcp/client"
interface OpenCodeZenRouteDeps {
logger: Logger
}
// Maximum number of tool execution loops
const MAX_TOOL_LOOPS = 10
export async function registerOpenCodeZenRoutes(
app: FastifyInstance,
deps: OpenCodeZenRouteDeps
@@ -49,12 +54,25 @@ export async function registerOpenCodeZenRoutes(
}
})
// Chat completion endpoint
// Chat completion endpoint WITH MCP TOOL SUPPORT
app.post('/api/opencode-zen/chat', async (request, reply) => {
try {
const chatRequest = request.body as ChatRequest
const chatRequest = request.body as ChatRequest & {
workspacePath?: string
enableTools?: boolean
}
// Handle streaming
// Extract workspace path for tool execution
const workspacePath = chatRequest.workspacePath || process.cwd()
const enableTools = chatRequest.enableTools !== false
logger.info({
workspacePath,
receivedWorkspacePath: chatRequest.workspacePath,
enableTools
}, "OpenCode Zen chat request received")
// Handle streaming with tool loop
if (chatRequest.stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
@@ -63,16 +81,14 @@ export async function registerOpenCodeZenRoutes(
})
try {
for await (const chunk of client.chatStream(chatRequest)) {
reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`)
// Check for finish
if (chunk.choices?.[0]?.finish_reason) {
reply.raw.write('data: [DONE]\n\n')
break
}
}
await streamWithToolLoop(
client,
chatRequest,
workspacePath,
enableTools,
reply.raw,
logger
)
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "OpenCode Zen streaming failed")
@@ -80,7 +96,14 @@ export async function registerOpenCodeZenRoutes(
reply.raw.end()
}
} else {
const response = await client.chat(chatRequest)
// Non-streaming with tool loop
const response = await chatWithToolLoop(
client,
chatRequest,
workspacePath,
enableTools,
logger
)
return response
}
} catch (error) {
@@ -89,5 +112,213 @@ export async function registerOpenCodeZenRoutes(
}
})
logger.info("OpenCode Zen routes registered - Free models available!")
logger.info("OpenCode Zen routes registered with MCP tool support - Free models available!")
}
/**
* Streaming chat with tool execution loop
*/
async function streamWithToolLoop(
client: OpenCodeZenClient,
request: ChatRequest,
workspacePath: string,
enableTools: boolean,
rawResponse: any,
logger: Logger
): Promise<void> {
let messages = [...request.messages]
let loopCount = 0
// Load MCP tools from workspace config
let allTools = [...CORE_TOOLS]
if (enableTools && workspacePath) {
try {
const mcpManager = getMcpManager()
await mcpManager.loadConfig(workspacePath)
const mcpTools = await mcpManager.getToolsAsOpenAIFormat()
allTools = [...CORE_TOOLS, ...mcpTools]
if (mcpTools.length > 0) {
logger.info({ mcpToolCount: mcpTools.length }, "Loaded MCP tools for OpenCode Zen")
}
} catch (mcpError) {
logger.warn({ error: mcpError }, "Failed to load MCP tools")
}
}
// Inject tools if enabled
const requestWithTools: ChatRequest = {
...request,
tools: enableTools ? allTools : undefined,
tool_choice: enableTools ? "auto" : undefined
}
while (loopCount < MAX_TOOL_LOOPS) {
loopCount++
// Accumulate tool calls from stream
let accumulatedToolCalls: { [index: number]: { id: string; name: string; arguments: string } } = {}
let hasToolCalls = false
let textContent = ""
// Stream response
for await (const chunk of client.chatStream({ ...requestWithTools, messages })) {
// Write chunk to client
rawResponse.write(`data: ${JSON.stringify(chunk)}\n\n`)
const choice = chunk.choices[0]
if (!choice) continue
// Accumulate text content
if (choice.delta?.content) {
textContent += choice.delta.content
}
// Accumulate tool calls from delta (if API supports it)
const deltaToolCalls = (choice.delta as any)?.tool_calls
if (deltaToolCalls) {
hasToolCalls = true
for (const tc of deltaToolCalls) {
const idx = tc.index ?? 0
if (!accumulatedToolCalls[idx]) {
accumulatedToolCalls[idx] = { id: tc.id || "", name: "", arguments: "" }
}
if (tc.id) accumulatedToolCalls[idx].id = tc.id
if (tc.function?.name) accumulatedToolCalls[idx].name += tc.function.name
if (tc.function?.arguments) accumulatedToolCalls[idx].arguments += tc.function.arguments
}
}
// Check if we should stop
if (choice.finish_reason === "stop") {
rawResponse.write('data: [DONE]\n\n')
return
}
}
// If no tool calls, we're done
if (!hasToolCalls || !enableTools) {
rawResponse.write('data: [DONE]\n\n')
return
}
// Convert accumulated tool calls
const toolCalls: ToolCall[] = Object.values(accumulatedToolCalls).map(tc => ({
id: tc.id,
type: "function" as const,
function: {
name: tc.name,
arguments: tc.arguments
}
}))
if (toolCalls.length === 0) {
rawResponse.write('data: [DONE]\n\n')
return
}
logger.info({ toolCalls: toolCalls.map(tc => tc.function.name) }, "Executing tool calls")
// Add assistant message with tool calls
const assistantMessage: ChatMessage = {
role: "assistant",
content: textContent || undefined,
tool_calls: toolCalls
}
messages.push(assistantMessage)
// Execute tools
const toolResults = await executeTools(workspacePath, toolCalls)
// Notify client about tool execution via special event
for (const result of toolResults) {
const toolEvent = {
type: "tool_result",
tool_call_id: result.tool_call_id,
content: result.content
}
rawResponse.write(`data: ${JSON.stringify(toolEvent)}\n\n`)
}
// Add tool results to messages
for (const result of toolResults) {
const toolMessage: ChatMessage = {
role: "tool",
content: result.content,
tool_call_id: result.tool_call_id
}
messages.push(toolMessage)
}
logger.info({ loopCount, toolsExecuted: toolResults.length }, "Tool loop iteration complete")
}
logger.warn({ loopCount }, "Max tool loops reached")
rawResponse.write('data: [DONE]\n\n')
}
/**
* Non-streaming chat with tool execution loop
*/
async function chatWithToolLoop(
client: OpenCodeZenClient,
request: ChatRequest,
workspacePath: string,
enableTools: boolean,
logger: Logger
): Promise<any> {
let messages = [...request.messages]
let loopCount = 0
let lastResponse: any = null
// Inject tools if enabled
const requestWithTools: ChatRequest = {
...request,
tools: enableTools ? CORE_TOOLS : undefined,
tool_choice: enableTools ? "auto" : undefined
}
while (loopCount < MAX_TOOL_LOOPS) {
loopCount++
const response = await client.chat({ ...requestWithTools, messages, stream: false })
lastResponse = response
const choice = response.choices[0]
if (!choice) break
const toolCalls = (choice.message as any)?.tool_calls
// If no tool calls, return
if (!toolCalls || toolCalls.length === 0 || !enableTools) {
return response
}
logger.info({ toolCalls: toolCalls.map((tc: any) => tc.function.name) }, "Executing tool calls")
// Add assistant message
const assistantMessage: ChatMessage = {
role: "assistant",
content: (choice.message as any).content || undefined,
tool_calls: toolCalls
}
messages.push(assistantMessage)
// Execute tools
const toolResults = await executeTools(workspacePath, toolCalls)
// Add tool results
for (const result of toolResults) {
const toolMessage: ChatMessage = {
role: "tool",
content: result.content,
tool_call_id: result.tool_call_id
}
messages.push(toolMessage)
}
logger.info({ loopCount, toolsExecuted: toolResults.length }, "Tool loop iteration complete")
}
logger.warn({ loopCount }, "Max tool loops reached")
return lastResponse
}

View File

@@ -1,10 +1,16 @@
import { FastifyInstance, FastifyReply } from "fastify"
import { join } from "path"
import { existsSync, mkdirSync } from "fs"
import { Logger } from "../../logger"
import { CORE_TOOLS, executeTools, type ToolCall, type ToolResult } from "../../tools/executor"
import { getMcpManager } from "../../mcp/client"
interface QwenRouteDeps {
logger: Logger
}
const MAX_TOOL_LOOPS = 10
const QWEN_OAUTH_BASE_URL = 'https://chat.qwen.ai'
const QWEN_OAUTH_DEVICE_CODE_ENDPOINT = `${QWEN_OAUTH_BASE_URL}/api/v1/oauth2/device/code`
const QWEN_OAUTH_TOKEN_ENDPOINT = `${QWEN_OAUTH_BASE_URL}/api/v1/oauth2/token`
@@ -17,6 +23,8 @@ function normalizeQwenModel(model?: string): string {
const raw = (model || "").trim()
if (!raw) return "coder-model"
const lower = raw.toLowerCase()
if (lower.startsWith("qwen-")) return lower
if (lower.includes("qwen")) return lower
if (lower === "vision-model" || lower.includes("vision")) return "vision-model"
if (lower === "coder-model") return "coder-model"
if (lower.includes("coder")) return "coder-model"
@@ -197,7 +205,159 @@ export async function registerQwenRoutes(
}
})
// Qwen Chat API - proxy chat requests to Qwen using OAuth token
/**
* Streaming chat with tool execution loop for Qwen
*/
async function streamWithToolLoop(
accessToken: string,
chatUrl: string,
initialRequest: any,
workspacePath: string,
enableTools: boolean,
rawResponse: any,
logger: Logger
) {
let messages = [...initialRequest.messages]
let loopCount = 0
const model = initialRequest.model
while (loopCount < MAX_TOOL_LOOPS) {
loopCount++
logger.info({ loopCount, model }, "Starting Qwen tool loop iteration")
const response = await fetch(chatUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${accessToken}`,
'Accept': 'text/event-stream'
},
body: JSON.stringify({
...initialRequest,
messages,
stream: true,
tools: enableTools ? initialRequest.tools : undefined,
tool_choice: enableTools ? "auto" : undefined
})
})
if (!response.ok) {
const errorText = await response.text()
throw new Error(`Qwen API error (${response.status}): ${errorText}`)
}
if (!response.body) throw new Error("No response body")
const reader = response.body.getReader()
const decoder = new TextDecoder()
let textContent = ""
let hasToolCalls = false
let accumulatedToolCalls: Record<number, { id: string, name: string, arguments: string }> = {}
let buffer = ""
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split("\n")
buffer = lines.pop() || ""
for (const line of lines) {
const trimmed = line.trim()
if (!trimmed.startsWith("data: ")) continue
const data = trimmed.slice(6).trim()
if (data === "[DONE]") {
if (!hasToolCalls) {
rawResponse.write('data: [DONE]\n\n')
return
}
break
}
let chunk: any
try {
chunk = JSON.parse(data)
} catch (e) {
continue
}
const choice = chunk.choices?.[0]
if (!choice) continue
// Pass through text content to client
if (choice.delta?.content) {
textContent += choice.delta.content
rawResponse.write(`data: ${JSON.stringify(chunk)}\n\n`)
}
// Accumulate tool calls
if (choice.delta?.tool_calls) {
hasToolCalls = true
for (const tc of choice.delta.tool_calls) {
const idx = tc.index ?? 0
if (!accumulatedToolCalls[idx]) {
accumulatedToolCalls[idx] = { id: tc.id || "", name: "", arguments: "" }
}
if (tc.id) accumulatedToolCalls[idx].id = tc.id
if (tc.function?.name) accumulatedToolCalls[idx].name += tc.function.name
if (tc.function?.arguments) accumulatedToolCalls[idx].arguments += tc.function.arguments
}
}
if (choice.finish_reason === "tool_calls") {
break
}
if (choice.finish_reason === "stop" && !hasToolCalls) {
rawResponse.write('data: [DONE]\n\n')
return
}
}
}
// If no tool calls, we're done
if (!hasToolCalls || !enableTools) {
rawResponse.write('data: [DONE]\n\n')
return
}
// Execute tools
const toolCalls: ToolCall[] = Object.values(accumulatedToolCalls).map(tc => ({
id: tc.id,
type: "function" as const,
function: { name: tc.name, arguments: tc.arguments }
}))
logger.info({ toolCalls: toolCalls.map(tc => tc.function.name) }, "Executing Qwen tool calls")
messages.push({
role: "assistant",
content: textContent || undefined,
tool_calls: toolCalls
})
const toolResults = await executeTools(workspacePath, toolCalls)
// Notify frontend
for (const result of toolResults) {
const toolEvent = {
type: "tool_result",
tool_call_id: result.tool_call_id,
content: result.content
}
rawResponse.write(`data: ${JSON.stringify(toolEvent)}\n\n`)
messages.push({
role: "tool",
content: result.content,
tool_call_id: result.tool_call_id
})
}
}
rawResponse.write('data: [DONE]\n\n')
}
// Qwen Chat API - with tool support
app.post('/api/qwen/chat', {
schema: {
body: {
@@ -207,7 +367,9 @@ export async function registerQwenRoutes(
model: { type: 'string' },
messages: { type: 'array' },
stream: { type: 'boolean' },
resource_url: { type: 'string' }
resource_url: { type: 'string' },
workspacePath: { type: 'string' },
enableTools: { type: 'boolean' }
}
}
}
@@ -219,63 +381,76 @@ export async function registerQwenRoutes(
}
const accessToken = authHeader.substring(7)
const { model, messages, stream, resource_url } = request.body as any
const { model, messages, stream, resource_url, workspacePath, enableTools } = request.body as any
// Use resource_url from OAuth credentials to target the DashScope-compatible API
const apiBaseUrl = normalizeQwenResourceUrl(resource_url)
const normalizedModel = normalizeQwenModel(model)
const chatUrl = `${apiBaseUrl}/chat/completions`
logger.info({ chatUrl, model: normalizedModel, messageCount: messages?.length }, "Proxying Qwen chat request")
// MCP Tool Loading
let allTools = [...CORE_TOOLS]
const effectiveWorkspacePath = workspacePath || process.cwd()
const toolsEnabled = enableTools !== false
const response = await fetch(chatUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${accessToken}`,
'Accept': stream ? 'text/event-stream' : 'application/json'
},
body: JSON.stringify({
model: normalizedModel,
messages,
stream: stream || false
})
})
if (!response.ok) {
const errorText = await response.text()
logger.error({ status: response.status, errorText }, "Qwen chat request failed")
return reply.status(response.status).send({ error: "Chat request failed", details: errorText })
if (toolsEnabled && effectiveWorkspacePath) {
try {
const mcpManager = getMcpManager()
await mcpManager.loadConfig(effectiveWorkspacePath)
const mcpTools = await mcpManager.getToolsAsOpenAIFormat()
allTools = [...CORE_TOOLS, ...mcpTools]
} catch (mcpError) {
logger.warn({ error: mcpError }, "Failed to load MCP tools for Qwen")
}
}
if (stream && response.body) {
// Stream the response
logger.info({ chatUrl, model: normalizedModel, tools: allTools.length }, "Proxying Qwen chat with tools")
if (stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
})
const reader = response.body.getReader()
const decoder = new TextDecoder()
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
reply.raw.write(chunk)
}
} finally {
reader.releaseLock()
await streamWithToolLoop(
accessToken,
chatUrl,
{ model: normalizedModel, messages, tools: allTools },
effectiveWorkspacePath,
toolsEnabled,
reply.raw,
logger
)
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "Qwen streaming failed")
reply.raw.write(`data: ${JSON.stringify({ error: String(streamError) })}\n\n`)
reply.raw.end()
}
} else {
const response = await fetch(chatUrl, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${accessToken}`
},
body: JSON.stringify({
model: normalizedModel,
messages,
stream: false
})
})
const data = await response.json()
return reply.send(data)
}
} catch (error) {
logger.error({ error }, "Qwen chat proxy failed")
if (reply.raw.headersSent) {
reply.raw.write(`data: ${JSON.stringify({ error: String(error) })}\n\n`)
reply.raw.end()
return
}
return reply.status(500).send({ error: "Chat request failed" })
}
})

View File

@@ -0,0 +1,243 @@
/**
* SDK Session Sync - Reads sessions from OpenCode's storage and syncs to Native mode
*
* OpenCode stores sessions in:
* - Windows: %USERPROFILE%\.local\share\opencode\storage\session\{projectId}\
* - Linux/Mac: ~/.local/share/opencode/storage/session/{projectId}/
*
* The projectId is a hash of the workspace folder path.
*/
import { FastifyInstance } from "fastify"
import { readdir, readFile, appendFile } from "fs/promises"
import { existsSync } from "fs"
import { join } from "path"
import { homedir } from "os"
import { Logger } from "../../logger"
import { getSessionManager } from "../../storage/session-store"
interface SdkSyncRouteDeps {
logger: Logger
dataDir: string
}
interface OpenCodeSession {
id: string
version: string
projectID: string
directory: string
title: string
parentID?: string
time: {
created: number
updated: number
}
summary?: {
additions: number
deletions: number
files: number
}
share?: {
url: string
version: number
}
revert?: {
messageID: string
reason?: string
}
}
/**
* Get the OpenCode storage directory
*/
function getOpenCodeStorageDir(): string {
const homeDir = homedir()
// Windows: %USERPROFILE%\.local\share\opencode
// Linux/Mac: ~/.local/share/opencode
return join(homeDir, ".local", "share", "opencode", "storage")
}
/**
* Read all sessions for a project from OpenCode's storage
*/
async function readOpenCodeSessions(folderPath: string, logger: Logger): Promise<OpenCodeSession[]> {
const storageDir = getOpenCodeStorageDir()
const sessionBaseDir = join(storageDir, "session")
const debugLogPath = join(process.cwd(), "sdk-sync-debug.log")
const logDebug = async (msg: string, obj?: any) => {
const line = `[${new Date().toISOString()}] ${msg}${obj ? ' ' + JSON.stringify(obj) : ''}\n`
await appendFile(debugLogPath, line).catch(() => { })
logger.info(obj || {}, msg)
}
// Normalize target folder path for comparison
const targetPath = folderPath.replace(/\\/g, '/').toLowerCase().trim()
await logDebug("Starting SDK session search", { folderPath, targetPath, sessionBaseDir })
if (!existsSync(sessionBaseDir)) {
await logDebug("OpenCode session base directory not found", { sessionBaseDir })
return []
}
try {
const projectDirs = await readdir(sessionBaseDir, { withFileTypes: true })
const dirs = projectDirs.filter(d => d.isDirectory()).map(d => d.name)
await logDebug("Scanning project directories", { count: dirs.length })
for (const projectId of dirs) {
const sessionDir = join(sessionBaseDir, projectId)
try {
const files = await readdir(sessionDir)
const firstSessionFile = files.find(f => f.startsWith("ses_") && f.endsWith(".json"))
if (firstSessionFile) {
const content = await readFile(join(sessionDir, firstSessionFile), "utf-8")
const sessionData = JSON.parse(content) as OpenCodeSession
if (!sessionData.directory) {
await logDebug("Session file missing directory field", { projectId, firstSessionFile })
continue
}
const sessionPath = sessionData.directory.replace(/\\/g, '/').toLowerCase().trim()
if (sessionPath === targetPath) {
await logDebug("MATCH FOUND!", { projectId, sessionPath })
// This is the correct directory, read all sessions
const sessions: OpenCodeSession[] = [sessionData]
const otherFiles = files.filter(f => f !== firstSessionFile && f.startsWith("ses_") && f.endsWith(".json"))
for (const file of otherFiles) {
try {
const fileContent = await readFile(join(sessionDir, file), "utf-8")
sessions.push(JSON.parse(fileContent) as OpenCodeSession)
} catch (e) {
logger.warn({ file, error: e }, "Failed to read session file")
}
}
await logDebug("Read sessions count", { count: sessions.length })
return sessions
} else {
// Just log a few mismatches to avoid bloating
// await logDebug("Mismatch", { sessionPath, targetPath })
}
}
} catch (e) {
await logDebug("Error scanning project directory", { projectId, error: String(e) })
}
}
} catch (error) {
await logDebug("Failed to scan OpenCode sessions directory", { error: String(error) })
}
await logDebug("No sessions found after scan")
return []
}
export function registerSdkSyncRoutes(app: FastifyInstance, deps: SdkSyncRouteDeps) {
const logger = deps.logger.child({ component: "sdk-sync" })
const sessionManager = getSessionManager(deps.dataDir)
/**
* Sync sessions from SDK (OpenCode) to Native mode
* This reads sessions directly from OpenCode's storage directory
*/
app.post<{
Params: { workspaceId: string }
Body: { folderPath: string }
}>("/api/native/workspaces/:workspaceId/sync-sdk", async (request, reply) => {
const { workspaceId } = request.params
const { folderPath } = request.body
if (!folderPath) {
return reply.status(400).send({ error: "Missing folderPath" })
}
logger.info({ workspaceId, folderPath }, "Starting SDK session sync")
try {
// Read sessions from OpenCode's storage
const sdkSessions = await readOpenCodeSessions(folderPath, logger)
if (sdkSessions.length === 0) {
return {
success: true,
imported: 0,
skipped: 0,
message: "No SDK sessions found for this folder"
}
}
// Convert OpenCode sessions to our format
const sessionsToImport = sdkSessions.map(s => ({
id: s.id,
title: s.title,
parentId: s.parentID || null,
createdAt: s.time.created,
updatedAt: s.time.updated,
// We don't have model/agent info in the SDK session format
// Those are stored in OpenCode's config, not session
}))
// Import to native session store
const result = await sessionManager.importSessions(workspaceId, sessionsToImport)
logger.info({ workspaceId, ...result }, "SDK session sync completed")
return {
success: true,
imported: result.imported,
skipped: result.skipped,
total: sdkSessions.length
}
} catch (error) {
logger.error({ error }, "SDK session sync failed")
return reply.status(500).send({
error: "Failed to sync SDK sessions",
details: error instanceof Error ? error.message : String(error)
})
}
})
/**
* Check if OpenCode sessions exist for a folder
*/
app.post<{
Body: { folderPath: string }
}>("/api/native/check-sdk-sessions", async (request, reply) => {
const { folderPath } = request.body
if (!folderPath) {
return reply.status(400).send({ error: "Missing folderPath" })
}
try {
const sdkSessions = await readOpenCodeSessions(folderPath, logger)
return {
found: sdkSessions.length > 0,
count: sdkSessions.length,
sessions: sdkSessions.map(s => ({
id: s.id,
title: s.title,
created: s.time.created
}))
}
} catch (error) {
logger.error({ error }, "Failed to check SDK sessions")
return { found: false, count: 0, sessions: [] }
}
})
logger.info("SDK sync routes registered")
}

View File

@@ -3,7 +3,7 @@ import { z } from "zod"
import { InstanceStore } from "../../storage/instance-store"
import { EventBus } from "../../events/bus"
import { ModelPreferenceSchema } from "../../config/schema"
import type { InstanceData, Task, SessionTasks } from "../../api-types"
import type { InstanceData } from "../../api-types"
import { WorkspaceManager } from "../../workspaces/manager"
interface RouteDeps {
@@ -18,12 +18,28 @@ const TaskSchema = z.object({
status: z.enum(["completed", "interrupted", "in-progress", "pending"]),
timestamp: z.number(),
messageIds: z.array(z.string()).optional(),
taskSessionId: z.string().optional(),
archived: z.boolean().optional(),
})
const InstanceDataSchema = z.object({
messageHistory: z.array(z.string()).default([]),
agentModelSelections: z.record(z.string(), ModelPreferenceSchema).default({}),
sessionTasks: z.record(z.string(), z.array(TaskSchema)).optional(),
sessionMessages: z
.record(
z.string(),
z.array(
z.object({
id: z.string(),
role: z.enum(["user", "assistant", "system", "tool"]),
content: z.string().optional(),
createdAt: z.number().optional(),
updatedAt: z.number().optional(),
}),
),
)
.optional(),
sessionSkills: z
.record(
z.string(),
@@ -45,6 +61,7 @@ const EMPTY_INSTANCE_DATA: InstanceData = {
messageHistory: [],
agentModelSelections: {},
sessionTasks: {},
sessionMessages: {},
sessionSkills: {},
customAgents: [],
}

View File

@@ -2,7 +2,7 @@ import { FastifyInstance, FastifyReply } from "fastify"
import { spawnSync } from "child_process"
import { z } from "zod"
import { existsSync, mkdirSync } from "fs"
import { cp, readFile, writeFile } from "fs/promises"
import { cp, readFile, writeFile, stat as readFileStat } from "fs/promises"
import path from "path"
import { WorkspaceManager } from "../../workspaces/manager"
import { InstanceStore } from "../../storage/instance-store"
@@ -257,6 +257,12 @@ export function registerWorkspaceRoutes(app: FastifyInstance, deps: RouteDeps) {
const configPath = path.join(workspace.path, ".mcp.json")
try {
await writeFile(configPath, JSON.stringify(body.config, null, 2), "utf-8")
// Auto-load MCP config into the manager after saving
const { getMcpManager } = await import("../../mcp/client")
const mcpManager = getMcpManager()
await mcpManager.loadConfig(workspace.path)
return { path: configPath, exists: true, config: body.config }
} catch (error) {
request.log.error({ err: error }, "Failed to write MCP config")
@@ -265,6 +271,119 @@ export function registerWorkspaceRoutes(app: FastifyInstance, deps: RouteDeps) {
}
})
// Get MCP connection status for a workspace
app.get<{ Params: { id: string } }>("/api/workspaces/:id/mcp-status", async (request, reply) => {
const workspace = deps.workspaceManager.get(request.params.id)
if (!workspace) {
reply.code(404)
return { error: "Workspace not found" }
}
try {
const { getMcpManager } = await import("../../mcp/client")
const mcpManager = getMcpManager()
// Load config if not already loaded
await mcpManager.loadConfig(workspace.path)
const status = mcpManager.getStatus()
const tools = await mcpManager.getAllTools()
return {
servers: status,
toolCount: tools.length,
tools: tools.map(t => ({ name: t.name, server: t.serverName, description: t.description }))
}
} catch (error) {
request.log.error({ err: error }, "Failed to get MCP status")
reply.code(500)
return { error: "Failed to get MCP status" }
}
})
// Connect all configured MCPs for a workspace
app.post<{ Params: { id: string } }>("/api/workspaces/:id/mcp-connect", async (request, reply) => {
const workspace = deps.workspaceManager.get(request.params.id)
if (!workspace) {
reply.code(404)
return { error: "Workspace not found" }
}
try {
const { getMcpManager } = await import("../../mcp/client")
const mcpManager = getMcpManager()
// Load config first
await mcpManager.loadConfig(workspace.path)
// Explicitly connect all servers
const connectionResults = await mcpManager.connectAll()
// Get tools from connected servers
const tools = await mcpManager.getAllTools()
// Transform connection results to status format
const status: Record<string, { connected: boolean }> = {}
for (const [name, result] of Object.entries(connectionResults)) {
status[name] = { connected: result.connected }
}
return {
success: true,
servers: status,
toolCount: tools.length,
connectionDetails: connectionResults
}
} catch (error) {
request.log.error({ err: error }, "Failed to connect MCPs")
reply.code(500)
return { error: "Failed to connect MCPs" }
}
})
app.post<{
Params: { id: string }
Body: { name: string; description?: string; systemPrompt: string; mode?: string }
}>("/api/workspaces/:id/agents", async (request, reply) => {
const workspace = deps.workspaceManager.get(request.params.id)
if (!workspace) {
reply.code(404)
return { error: "Workspace not found" }
}
const { name, description, systemPrompt } = request.body
if (!name || !systemPrompt) {
reply.code(400)
return { error: "Name and systemPrompt are required" }
}
try {
const data = await deps.instanceStore.read(workspace.path)
const customAgents = data.customAgents || []
// Update existing or add new
const existingIndex = customAgents.findIndex(a => a.name === name)
const agentData = { name, description, prompt: systemPrompt }
if (existingIndex >= 0) {
customAgents[existingIndex] = agentData
} else {
customAgents.push(agentData)
}
await deps.instanceStore.write(workspace.path, {
...data,
customAgents
})
return { success: true, agent: agentData }
} catch (error) {
request.log.error({ err: error }, "Failed to save custom agent")
reply.code(500)
return { error: "Failed to save custom agent" }
}
})
app.post<{
Body: { source: string; destination: string; includeConfig?: boolean }
}>("/api/workspaces/import", async (request, reply) => {
@@ -308,6 +427,53 @@ export function registerWorkspaceRoutes(app: FastifyInstance, deps: RouteDeps) {
return workspace
})
// Serve static files from workspace for preview
app.get<{ Params: { id: string; "*": string } }>("/api/workspaces/:id/serve/*", async (request, reply) => {
const workspace = deps.workspaceManager.get(request.params.id)
if (!workspace) {
reply.code(404)
return { error: "Workspace not found" }
}
const relativePath = request.params["*"]
const filePath = path.join(workspace.path, relativePath)
// Security check: ensure file is within workspace.path
if (!filePath.startsWith(workspace.path)) {
reply.code(403)
return { error: "Access denied" }
}
if (!existsSync(filePath)) {
reply.code(404)
return { error: "File not found" }
}
const stat = await readFileStat(filePath)
if (!stat.isFile()) {
reply.code(400)
return { error: "Not a file" }
}
const ext = path.extname(filePath).toLowerCase()
const mimeTypes: Record<string, string> = {
".html": "text/html",
".htm": "text/html",
".js": "application/javascript",
".css": "text/css",
".json": "application/json",
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".svg": "image/svg+xml",
".txt": "text/plain",
}
reply.type(mimeTypes[ext] || "application/octet-stream")
return await readFile(filePath)
})
}

View File

@@ -1,16 +1,24 @@
import { FastifyInstance } from "fastify"
import { ZAIClient, ZAI_MODELS, type ZAIConfig, type ZAIChatRequest, ZAIChatRequestSchema } from "../../integrations/zai-api"
import { FastifyInstance, FastifyRequest } from "fastify"
import { ZAIClient, ZAI_MODELS, type ZAIConfig, type ZAIChatRequest, type ZAIMessage } from "../../integrations/zai-api"
import { Logger } from "../../logger"
import { existsSync, readFileSync, writeFileSync, mkdirSync } from "fs"
import { join } from "path"
import { getUserIntegrationsDir } from "../../user-data"
import { getUserIntegrationsDir, getUserIdFromRequest } from "../../user-context"
import { CORE_TOOLS, executeTools, type ToolCall, type ToolResult } from "../../tools/executor"
import { getMcpManager } from "../../mcp/client"
interface ZAIRouteDeps {
logger: Logger
}
const CONFIG_DIR = getUserIntegrationsDir()
const CONFIG_FILE = join(CONFIG_DIR, "zai-config.json")
// Maximum number of tool execution loops to prevent infinite recursion
const MAX_TOOL_LOOPS = 10
// Helper to get config file path for a user
function getConfigFile(userId?: string | null): string {
const configDir = getUserIntegrationsDir(userId || undefined)
return join(configDir, "zai-config.json")
}
export async function registerZAIRoutes(
app: FastifyInstance,
@@ -18,15 +26,12 @@ export async function registerZAIRoutes(
) {
const logger = deps.logger.child({ component: "zai-routes" })
// Ensure config directory exists
if (!existsSync(CONFIG_DIR)) {
mkdirSync(CONFIG_DIR, { recursive: true })
}
// Get Z.AI configuration
// Get Z.AI configuration (per-user)
app.get('/api/zai/config', async (request, reply) => {
try {
const config = getZAIConfig()
const userId = getUserIdFromRequest(request)
const config = getZAIConfig(userId)
logger.debug({ userId }, "Getting Z.AI config for user")
return { config: { ...config, apiKey: config.apiKey ? '***' : undefined } }
} catch (error) {
logger.error({ error }, "Failed to get Z.AI config")
@@ -34,12 +39,13 @@ export async function registerZAIRoutes(
}
})
// Update Z.AI configuration
// Update Z.AI configuration (per-user)
app.post('/api/zai/config', async (request, reply) => {
try {
const userId = getUserIdFromRequest(request)
const { enabled, apiKey, endpoint } = request.body as Partial<ZAIConfig>
updateZAIConfig({ enabled, apiKey, endpoint })
logger.info("Z.AI configuration updated")
updateZAIConfig({ enabled, apiKey, endpoint }, userId)
logger.info({ userId }, "Z.AI configuration updated for user")
return { success: true, config: { enabled, endpoint, apiKey: apiKey ? '***' : undefined } }
} catch (error) {
logger.error({ error }, "Failed to update Z.AI config")
@@ -47,10 +53,11 @@ export async function registerZAIRoutes(
}
})
// Test Z.AI connection
// Test Z.AI connection (per-user)
app.post('/api/zai/test', async (request, reply) => {
try {
const config = getZAIConfig()
const userId = getUserIdFromRequest(request)
const config = getZAIConfig(userId)
if (!config.enabled) {
return reply.status(400).send({ error: "Z.AI is not enabled" })
}
@@ -75,18 +82,56 @@ export async function registerZAIRoutes(
}
})
// Chat completion endpoint
// Chat completion endpoint WITH MCP TOOL SUPPORT (per-user)
app.post('/api/zai/chat', async (request, reply) => {
try {
const config = getZAIConfig()
const userId = getUserIdFromRequest(request)
const config = getZAIConfig(userId)
if (!config.enabled) {
return reply.status(400).send({ error: "Z.AI is not enabled" })
}
const client = new ZAIClient(config)
const chatRequest = request.body as ZAIChatRequest
const chatRequest = request.body as ZAIChatRequest & {
workspacePath?: string
enableTools?: boolean
}
// Handle streaming
// Extract workspace path for tool execution
// IMPORTANT: workspacePath must be provided by frontend, otherwise tools write to server directory
const workspacePath = chatRequest.workspacePath || process.cwd()
const enableTools = chatRequest.enableTools !== false // Default to true
logger.info({
workspacePath,
receivedWorkspacePath: chatRequest.workspacePath,
enableTools
}, "Z.AI chat request received")
// Load MCP tools from workspace config
let allTools = [...CORE_TOOLS]
if (enableTools && workspacePath) {
try {
const mcpManager = getMcpManager()
await mcpManager.loadConfig(workspacePath)
const mcpTools = await mcpManager.getToolsAsOpenAIFormat()
allTools = [...CORE_TOOLS, ...mcpTools]
if (mcpTools.length > 0) {
logger.info({ mcpToolCount: mcpTools.length }, "Loaded MCP tools")
}
} catch (mcpError) {
logger.warn({ error: mcpError }, "Failed to load MCP tools, using core tools only")
}
}
// Inject tools into request if enabled
const requestWithTools: ZAIChatRequest = {
...chatRequest,
tools: enableTools ? allTools : undefined,
tool_choice: enableTools ? "auto" : undefined
}
// Handle streaming with tool execution loop
if (chatRequest.stream) {
reply.raw.writeHead(200, {
'Content-Type': 'text/event-stream',
@@ -95,17 +140,14 @@ export async function registerZAIRoutes(
})
try {
for await (const chunk of client.chatStream(chatRequest)) {
reply.raw.write(`data: ${JSON.stringify(chunk)}\n\n`)
// Check for finish_reason to end stream
const finishReason = chunk.choices[0]?.finish_reason
if (finishReason) {
reply.raw.write('data: [DONE]\n\n')
break
}
}
await streamWithToolLoop(
client,
requestWithTools,
workspacePath,
enableTools,
reply.raw,
logger
)
reply.raw.end()
} catch (streamError) {
logger.error({ error: streamError }, "Z.AI streaming failed")
@@ -113,7 +155,14 @@ export async function registerZAIRoutes(
reply.raw.end()
}
} else {
const response = await client.chat(chatRequest)
// Non-streaming with tool loop
const response = await chatWithToolLoop(
client,
requestWithTools,
workspacePath,
enableTools,
logger
)
return response
}
} catch (error) {
@@ -122,23 +171,228 @@ export async function registerZAIRoutes(
}
})
logger.info("Z.AI routes registered")
logger.info("Z.AI routes registered with MCP tool support")
}
function getZAIConfig(): ZAIConfig {
/**
* Streaming chat with tool execution loop
*/
async function streamWithToolLoop(
client: ZAIClient,
request: ZAIChatRequest,
workspacePath: string,
enableTools: boolean,
rawResponse: any,
logger: Logger
): Promise<void> {
let messages = [...request.messages]
let loopCount = 0
while (loopCount < MAX_TOOL_LOOPS) {
loopCount++
// Accumulate tool calls from stream
let accumulatedToolCalls: { [index: number]: { id: string; name: string; arguments: string } } = {}
let hasToolCalls = false
let textContent = ""
// Stream response
for await (const chunk of client.chatStream({ ...request, messages })) {
// Write chunk to client
rawResponse.write(`data: ${JSON.stringify(chunk)}\n\n`)
const choice = chunk.choices[0]
if (!choice) continue
// Accumulate text content
if (choice.delta?.content) {
textContent += choice.delta.content
}
// Accumulate tool calls from delta
if (choice.delta?.tool_calls) {
hasToolCalls = true
for (const tc of choice.delta.tool_calls) {
const idx = tc.index ?? 0
if (!accumulatedToolCalls[idx]) {
accumulatedToolCalls[idx] = { id: tc.id || "", name: "", arguments: "" }
}
if (tc.id) accumulatedToolCalls[idx].id = tc.id
if (tc.function?.name) accumulatedToolCalls[idx].name += tc.function.name
if (tc.function?.arguments) accumulatedToolCalls[idx].arguments += tc.function.arguments
}
}
// Check if we should stop
if (choice.finish_reason === "stop") {
rawResponse.write('data: [DONE]\n\n')
return
}
}
// If no tool calls, we're done
if (!hasToolCalls || !enableTools) {
rawResponse.write('data: [DONE]\n\n')
return
}
// Convert accumulated tool calls
const toolCalls: ToolCall[] = Object.values(accumulatedToolCalls).map(tc => ({
id: tc.id,
type: "function" as const,
function: {
name: tc.name,
arguments: tc.arguments
}
}))
if (toolCalls.length === 0) {
rawResponse.write('data: [DONE]\n\n')
return
}
logger.info({ toolCalls: toolCalls.map(tc => tc.function.name) }, "Executing tool calls")
// Add assistant message with tool calls
const assistantMessage: ZAIMessage = {
role: "assistant",
content: textContent || undefined,
tool_calls: toolCalls
}
messages.push(assistantMessage)
// Execute tools
const toolResults = await executeTools(workspacePath, toolCalls)
// Notify client about tool execution via special event
for (const result of toolResults) {
const toolEvent = {
type: "tool_result",
tool_call_id: result.tool_call_id,
content: result.content
}
rawResponse.write(`data: ${JSON.stringify(toolEvent)}\n\n`)
}
// Add tool results to messages
for (const result of toolResults) {
const toolMessage: ZAIMessage = {
role: "tool",
content: result.content,
tool_call_id: result.tool_call_id
}
messages.push(toolMessage)
}
logger.info({ loopCount, toolsExecuted: toolResults.length }, "Tool loop iteration complete")
}
logger.warn({ loopCount }, "Max tool loops reached")
rawResponse.write('data: [DONE]\n\n')
}
/**
* Non-streaming chat with tool execution loop
*/
async function chatWithToolLoop(
client: ZAIClient,
request: ZAIChatRequest,
workspacePath: string,
enableTools: boolean,
logger: Logger
): Promise<any> {
let messages = [...request.messages]
let loopCount = 0
let lastResponse: any = null
while (loopCount < MAX_TOOL_LOOPS) {
loopCount++
const response = await client.chat({ ...request, messages, stream: false })
lastResponse = response
const choice = response.choices[0]
if (!choice) break
const toolCalls = choice.message?.tool_calls
// If no tool calls or finish_reason is "stop", return
if (!toolCalls || toolCalls.length === 0 || !enableTools) {
return response
}
logger.info({ toolCalls: toolCalls.map((tc: any) => tc.function.name) }, "Executing tool calls")
// Add assistant message
const assistantMessage: ZAIMessage = {
role: "assistant",
content: choice.message.content || undefined,
tool_calls: toolCalls
}
messages.push(assistantMessage)
// Execute tools
const toolResults = await executeTools(workspacePath, toolCalls)
// Add tool results
for (const result of toolResults) {
const toolMessage: ZAIMessage = {
role: "tool",
content: result.content,
tool_call_id: result.tool_call_id
}
messages.push(toolMessage)
}
logger.info({ loopCount, toolsExecuted: toolResults.length }, "Tool loop iteration complete")
}
logger.warn({ loopCount }, "Max tool loops reached")
return lastResponse
}
function getZAIConfig(userId?: string | null): ZAIConfig {
const configFile = getConfigFile(userId)
try {
if (existsSync(CONFIG_FILE)) {
const data = readFileSync(CONFIG_FILE, 'utf-8')
return JSON.parse(data)
console.log(`[Z.AI] Looking for config at: ${configFile} (user: ${userId || "default"})`)
if (existsSync(configFile)) {
const data = readFileSync(configFile, 'utf-8')
const parsed = JSON.parse(data)
console.log(`[Z.AI] Config loaded from file, enabled: ${parsed.enabled}`)
return parsed
}
return { enabled: false, endpoint: "https://api.z.ai/api/paas/v4", timeout: 300000 }
} catch {
return { enabled: false, endpoint: "https://api.z.ai/api/paas/v4", timeout: 300000 }
console.log(`[Z.AI] Config file not found, using defaults`)
return { enabled: false, endpoint: "https://api.z.ai/api/coding/paas/v4", timeout: 300000 }
} catch (error) {
console.error(`[Z.AI] Error reading config:`, error)
return { enabled: false, endpoint: "https://api.z.ai/api/coding/paas/v4", timeout: 300000 }
}
}
function updateZAIConfig(config: Partial<ZAIConfig>): void {
const current = getZAIConfig()
function updateZAIConfig(config: Partial<ZAIConfig>, userId?: string | null): void {
const configFile = getConfigFile(userId)
const configDir = getUserIntegrationsDir(userId || undefined)
// Ensure directory exists with proper error handling
try {
if (!existsSync(configDir)) {
console.log(`[Z.AI] Creating config directory: ${configDir}`)
mkdirSync(configDir, { recursive: true })
}
} catch (mkdirError) {
console.error(`[Z.AI] Failed to create config directory:`, mkdirError)
throw new Error(`Failed to create config directory: ${mkdirError}`)
}
const current = getZAIConfig(userId)
const updated = { ...current, ...config }
writeFileSync(CONFIG_FILE, JSON.stringify(updated, null, 2))
try {
console.log(`[Z.AI] Writing config to: ${configFile} (user: ${userId || "default"})`)
writeFileSync(configFile, JSON.stringify(updated, null, 2), 'utf-8')
console.log(`[Z.AI] Config saved successfully`)
} catch (writeError) {
console.error(`[Z.AI] Failed to write config file:`, writeError)
throw new Error(`Failed to write config file: ${writeError}`)
}
}

View File

@@ -8,6 +8,7 @@ const DEFAULT_INSTANCE_DATA: InstanceData = {
messageHistory: [],
agentModelSelections: {},
sessionTasks: {},
sessionMessages: {},
}
export class InstanceStore {

View File

@@ -0,0 +1,412 @@
/**
* Session Store - Native session management without OpenCode binary
*
* This provides a complete replacement for OpenCode's session management,
* allowing NomadArch to work in "Binary-Free Mode".
*/
import { readFile, writeFile, mkdir } from "fs/promises"
import { existsSync } from "fs"
import path from "path"
import { ulid } from "ulid"
import { createLogger } from "../logger"
const log = createLogger({ component: "session-store" })
// Types matching OpenCode's schema for compatibility
export interface SessionMessage {
id: string
sessionId: string
role: "user" | "assistant" | "system" | "tool"
content?: string
parts?: MessagePart[]
createdAt: number
updatedAt: number
toolCalls?: ToolCall[]
toolCallId?: string
status?: "pending" | "streaming" | "completed" | "error"
}
type IncomingSessionMessage = Omit<SessionMessage, "id" | "sessionId" | "createdAt" | "updatedAt"> & {
id?: string
createdAt?: number
updatedAt?: number
}
export interface MessagePart {
type: "text" | "tool_call" | "tool_result" | "thinking" | "code"
content?: string
toolCall?: ToolCall
toolResult?: ToolResult
}
export interface ToolCall {
id: string
type: "function"
function: {
name: string
arguments: string
}
}
export interface ToolResult {
toolCallId: string
content: string
isError?: boolean
}
export interface Session {
id: string
workspaceId: string
title?: string
parentId?: string | null
createdAt: number
updatedAt: number
messageIds: string[]
model?: {
providerId: string
modelId: string
}
agent?: string
revert?: {
messageID: string
reason?: string
} | null
}
export interface SessionStore {
sessions: Record<string, Session>
messages: Record<string, SessionMessage>
}
/**
* Native session management for Binary-Free Mode
*/
export class NativeSessionManager {
private stores = new Map<string, SessionStore>()
private dataDir: string
constructor(dataDir: string) {
this.dataDir = dataDir
}
private getStorePath(workspaceId: string): string {
return path.join(this.dataDir, workspaceId, "sessions.json")
}
private async ensureDir(workspaceId: string): Promise<void> {
const dir = path.join(this.dataDir, workspaceId)
if (!existsSync(dir)) {
await mkdir(dir, { recursive: true })
}
}
private async loadStore(workspaceId: string): Promise<SessionStore> {
if (this.stores.has(workspaceId)) {
return this.stores.get(workspaceId)!
}
const storePath = this.getStorePath(workspaceId)
let store: SessionStore = { sessions: {}, messages: {} }
if (existsSync(storePath)) {
try {
const data = await readFile(storePath, "utf-8")
store = JSON.parse(data)
} catch (error) {
log.error({ workspaceId, error }, "Failed to load session store")
}
}
this.stores.set(workspaceId, store)
return store
}
private async saveStore(workspaceId: string): Promise<void> {
const store = this.stores.get(workspaceId)
if (!store) return
await this.ensureDir(workspaceId)
const storePath = this.getStorePath(workspaceId)
await writeFile(storePath, JSON.stringify(store, null, 2), "utf-8")
}
// Session CRUD operations
async listSessions(workspaceId: string): Promise<Session[]> {
const store = await this.loadStore(workspaceId)
return Object.values(store.sessions).sort((a, b) => b.updatedAt - a.updatedAt)
}
async getSession(workspaceId: string, sessionId: string): Promise<Session | null> {
const store = await this.loadStore(workspaceId)
return store.sessions[sessionId] ?? null
}
async createSession(workspaceId: string, options?: {
title?: string
parentId?: string
model?: { providerId: string; modelId: string }
agent?: string
}): Promise<Session> {
const store = await this.loadStore(workspaceId)
const now = Date.now()
const session: Session = {
id: ulid(),
workspaceId,
title: options?.title ?? "New Session",
parentId: options?.parentId ?? null,
createdAt: now,
updatedAt: now,
messageIds: [],
model: options?.model,
agent: options?.agent,
}
store.sessions[session.id] = session
await this.saveStore(workspaceId)
log.info({ workspaceId, sessionId: session.id }, "Created new session")
return session
}
async updateSession(workspaceId: string, sessionId: string, updates: Partial<Session>): Promise<Session | null> {
const store = await this.loadStore(workspaceId)
const session = store.sessions[sessionId]
if (!session) return null
const updated = {
...session,
...updates,
id: session.id, // Prevent ID change
workspaceId: session.workspaceId, // Prevent workspace change
updatedAt: Date.now(),
}
store.sessions[sessionId] = updated
await this.saveStore(workspaceId)
return updated
}
async deleteSession(workspaceId: string, sessionId: string): Promise<boolean> {
const store = await this.loadStore(workspaceId)
const session = store.sessions[sessionId]
if (!session) return false
// Delete all messages in the session
for (const messageId of session.messageIds) {
delete store.messages[messageId]
}
delete store.sessions[sessionId]
await this.saveStore(workspaceId)
log.info({ workspaceId, sessionId }, "Deleted session")
return true
}
async forkSession(workspaceId: string, sessionId: string): Promise<Session> {
const store = await this.loadStore(workspaceId)
const original = store.sessions[sessionId]
if (!original) throw new Error(`Session not found: ${sessionId}`)
const now = Date.now()
const forked: Session = {
...original,
id: ulid(),
title: original.title ? `${original.title} (fork)` : "Forked Session",
parentId: original.parentId || original.id,
createdAt: now,
updatedAt: now,
messageIds: [...original.messageIds], // Shallow copy of message IDs
}
store.sessions[forked.id] = forked
await this.saveStore(workspaceId)
return forked
}
async revert(workspaceId: string, sessionId: string, messageId?: string): Promise<Session> {
const store = await this.loadStore(workspaceId)
const session = store.sessions[sessionId]
if (!session) throw new Error(`Session not found: ${sessionId}`)
if (!messageId) {
// Revert last message
if (session.messageIds.length > 0) {
const lastId = session.messageIds.pop()
if (lastId) delete store.messages[lastId]
}
} else {
// Revert to specific message
const index = session.messageIds.indexOf(messageId)
if (index !== -1) {
const toDelete = session.messageIds.splice(index + 1)
for (const id of toDelete) {
delete store.messages[id]
}
}
}
session.updatedAt = Date.now()
await this.saveStore(workspaceId)
return session
}
// Message operations
async getSessionMessages(workspaceId: string, sessionId: string): Promise<SessionMessage[]> {
const store = await this.loadStore(workspaceId)
const session = store.sessions[sessionId]
if (!session) return []
return session.messageIds
.map(id => store.messages[id])
.filter((msg): msg is SessionMessage => msg !== undefined)
}
async addMessage(workspaceId: string, sessionId: string, message: IncomingSessionMessage): Promise<SessionMessage> {
const store = await this.loadStore(workspaceId)
const session = store.sessions[sessionId]
if (!session) throw new Error(`Session not found: ${sessionId}`)
const now = Date.now()
const messageId = message.id ?? ulid()
const createdAt = typeof message.createdAt === "number" ? message.createdAt : now
const updatedAt = typeof message.updatedAt === "number" ? message.updatedAt : createdAt
const newMessage: SessionMessage = {
...message,
id: messageId,
sessionId,
createdAt,
updatedAt,
}
store.messages[newMessage.id] = newMessage
if (!session.messageIds.includes(newMessage.id)) {
session.messageIds.push(newMessage.id)
}
session.updatedAt = updatedAt
await this.saveStore(workspaceId)
return newMessage
}
async updateMessage(workspaceId: string, messageId: string, updates: Partial<SessionMessage>): Promise<SessionMessage | null> {
const store = await this.loadStore(workspaceId)
const message = store.messages[messageId]
if (!message) return null
const updated = {
...message,
...updates,
id: message.id, // Prevent ID change
sessionId: message.sessionId, // Prevent session change
updatedAt: Date.now(),
}
store.messages[messageId] = updated
await this.saveStore(workspaceId)
return updated
}
// Utility
async clearWorkspace(workspaceId: string): Promise<void> {
this.stores.delete(workspaceId)
// Optionally delete file
}
getActiveSessionCount(workspaceId: string): number {
const store = this.stores.get(workspaceId)
return store ? Object.keys(store.sessions).length : 0
}
/**
* Import sessions from SDK mode format - for migration when switching modes
*/
async importSessions(workspaceId: string, sessions: Array<{
id: string
title?: string
parentId?: string | null
createdAt?: number
updatedAt?: number
model?: { providerId: string; modelId: string }
agent?: string
messages?: Array<{
id: string
role: "user" | "assistant" | "system" | "tool"
content?: string
createdAt?: number
}>
}>): Promise<{ imported: number; skipped: number }> {
const store = await this.loadStore(workspaceId)
let imported = 0
let skipped = 0
for (const sdkSession of sessions) {
// Skip if session already exists
if (store.sessions[sdkSession.id]) {
skipped++
continue
}
const now = Date.now()
const session: Session = {
id: sdkSession.id,
workspaceId,
title: sdkSession.title || "Imported Session",
parentId: sdkSession.parentId ?? null,
createdAt: sdkSession.createdAt || now,
updatedAt: sdkSession.updatedAt || now,
messageIds: [],
model: sdkSession.model,
agent: sdkSession.agent,
}
// Import messages if provided
if (sdkSession.messages && Array.isArray(sdkSession.messages)) {
for (const msg of sdkSession.messages) {
const message: SessionMessage = {
id: msg.id,
sessionId: sdkSession.id,
role: msg.role,
content: msg.content,
createdAt: msg.createdAt || now,
updatedAt: msg.createdAt || now,
status: "completed"
}
store.messages[msg.id] = message
session.messageIds.push(msg.id)
}
}
store.sessions[sdkSession.id] = session
imported++
}
await this.saveStore(workspaceId)
log.info({ workspaceId, imported, skipped }, "Imported sessions from SDK mode")
return { imported, skipped }
}
}
// Singleton instance
let sessionManager: NativeSessionManager | null = null
export function getSessionManager(dataDir?: string): NativeSessionManager {
if (!sessionManager) {
if (!dataDir) {
throw new Error("Session manager not initialized - provide dataDir")
}
sessionManager = new NativeSessionManager(dataDir)
}
return sessionManager
}
export function initSessionManager(dataDir: string): NativeSessionManager {
sessionManager = new NativeSessionManager(dataDir)
return sessionManager
}

View File

@@ -0,0 +1,352 @@
/**
* Tool Executor Service
* Provides MCP-compatible tool definitions and execution for all AI models.
* This enables Z.AI, Qwen, OpenCode Zen, etc. to write files, read files, and interact with the workspace.
*/
import fs from "fs"
import path from "path"
import { createLogger } from "../logger"
import { getMcpManager } from "../mcp/client"
const log = createLogger({ component: "tool-executor" })
// OpenAI-compatible Tool Definition Schema
export interface ToolDefinition {
type: "function"
function: {
name: string
description: string
parameters: {
type: "object"
properties: Record<string, { type: string; description?: string }>
required?: string[]
}
}
}
// Tool Call from LLM Response
export interface ToolCall {
id: string
type: "function"
function: {
name: string
arguments: string // JSON string
}
}
// Tool Execution Result
export interface ToolResult {
tool_call_id: string
role: "tool"
content: string
}
/**
* Core Tool Definitions for MCP
* These follow OpenAI's function calling schema (compatible with Z.AI GLM-4)
*/
export const CORE_TOOLS: ToolDefinition[] = [
{
type: "function",
function: {
name: "write_file",
description: "Write content to a file in the workspace. Creates the file if it doesn't exist, or overwrites if it does. Use this to generate code files, configuration, or any text content.",
parameters: {
type: "object",
properties: {
path: {
type: "string",
description: "Relative path to the file within the workspace (e.g., 'src/components/Button.tsx')"
},
content: {
type: "string",
description: "The full content to write to the file"
}
},
required: ["path", "content"]
}
}
},
{
type: "function",
function: {
name: "read_file",
description: "Read the contents of a file from the workspace.",
parameters: {
type: "object",
properties: {
path: {
type: "string",
description: "Relative path to the file within the workspace"
}
},
required: ["path"]
}
}
},
{
type: "function",
function: {
name: "list_files",
description: "List files and directories in a workspace directory.",
parameters: {
type: "object",
properties: {
path: {
type: "string",
description: "Relative path to the directory (use '.' for root)"
}
},
required: ["path"]
}
}
},
{
type: "function",
function: {
name: "create_directory",
description: "Create a directory in the workspace. Creates parent directories if needed.",
parameters: {
type: "object",
properties: {
path: {
type: "string",
description: "Relative path to the directory to create"
}
},
required: ["path"]
}
}
},
{
type: "function",
function: {
name: "delete_file",
description: "Delete a file from the workspace.",
parameters: {
type: "object",
properties: {
path: {
type: "string",
description: "Relative path to the file to delete"
}
},
required: ["path"]
}
}
}
]
/**
* Execute a tool call within a workspace context
*/
export async function executeTool(
workspacePath: string,
toolCall: ToolCall
): Promise<ToolResult> {
const { id, function: fn } = toolCall
const name = fn.name
let args: Record<string, unknown>
try {
args = JSON.parse(fn.arguments)
} catch (e) {
return {
tool_call_id: id,
role: "tool",
content: `Error: Failed to parse tool arguments: ${fn.arguments}`
}
}
log.info({ tool: name, args, workspacePath }, "Executing tool")
try {
switch (name) {
case "write_file": {
const relativePath = String(args.path || "")
const content = String(args.content || "")
const fullPath = path.resolve(workspacePath, relativePath)
// Security check: ensure we're still within workspace
if (!fullPath.startsWith(path.resolve(workspacePath))) {
return {
tool_call_id: id,
role: "tool",
content: `Error: Path escapes workspace boundary: ${relativePath}`
}
}
// Ensure parent directory exists
const dir = path.dirname(fullPath)
if (!fs.existsSync(dir)) {
fs.mkdirSync(dir, { recursive: true })
}
fs.writeFileSync(fullPath, content, "utf-8")
log.info({ path: relativePath, bytes: content.length }, "File written successfully")
return {
tool_call_id: id,
role: "tool",
content: `Successfully wrote ${content.length} bytes to ${relativePath}`
}
}
case "read_file": {
const relativePath = String(args.path || "")
const fullPath = path.resolve(workspacePath, relativePath)
if (!fullPath.startsWith(path.resolve(workspacePath))) {
return {
tool_call_id: id,
role: "tool",
content: `Error: Path escapes workspace boundary: ${relativePath}`
}
}
if (!fs.existsSync(fullPath)) {
return {
tool_call_id: id,
role: "tool",
content: `Error: File not found: ${relativePath}`
}
}
const content = fs.readFileSync(fullPath, "utf-8")
return {
tool_call_id: id,
role: "tool",
content: content.slice(0, 50000) // Limit to prevent context overflow
}
}
case "list_files": {
const relativePath = String(args.path || ".")
const fullPath = path.resolve(workspacePath, relativePath)
if (!fullPath.startsWith(path.resolve(workspacePath))) {
return {
tool_call_id: id,
role: "tool",
content: `Error: Path escapes workspace boundary: ${relativePath}`
}
}
if (!fs.existsSync(fullPath)) {
return {
tool_call_id: id,
role: "tool",
content: `Error: Directory not found: ${relativePath}`
}
}
const entries = fs.readdirSync(fullPath, { withFileTypes: true })
const listing = entries.map(e =>
e.isDirectory() ? `${e.name}/` : e.name
).join("\n")
return {
tool_call_id: id,
role: "tool",
content: listing || "(empty directory)"
}
}
case "create_directory": {
const relativePath = String(args.path || "")
const fullPath = path.resolve(workspacePath, relativePath)
if (!fullPath.startsWith(path.resolve(workspacePath))) {
return {
tool_call_id: id,
role: "tool",
content: `Error: Path escapes workspace boundary: ${relativePath}`
}
}
fs.mkdirSync(fullPath, { recursive: true })
return {
tool_call_id: id,
role: "tool",
content: `Successfully created directory: ${relativePath}`
}
}
case "delete_file": {
const relativePath = String(args.path || "")
const fullPath = path.resolve(workspacePath, relativePath)
if (!fullPath.startsWith(path.resolve(workspacePath))) {
return {
tool_call_id: id,
role: "tool",
content: `Error: Path escapes workspace boundary: ${relativePath}`
}
}
if (!fs.existsSync(fullPath)) {
return {
tool_call_id: id,
role: "tool",
content: `Error: File not found: ${relativePath}`
}
}
fs.unlinkSync(fullPath)
return {
tool_call_id: id,
role: "tool",
content: `Successfully deleted: ${relativePath}`
}
}
default: {
// Check if this is an MCP tool (format: mcp_servername_toolname)
if (name.startsWith("mcp_")) {
try {
const mcpManager = getMcpManager()
const result = await mcpManager.executeTool(name, args)
return {
tool_call_id: id,
role: "tool",
content: result
}
} catch (mcpError) {
const message = mcpError instanceof Error ? mcpError.message : String(mcpError)
return {
tool_call_id: id,
role: "tool",
content: `MCP tool error: ${message}`
}
}
}
return {
tool_call_id: id,
role: "tool",
content: `Error: Unknown tool: ${name}`
}
}
}
} catch (error) {
const message = error instanceof Error ? error.message : String(error)
log.error({ tool: name, error: message }, "Tool execution failed")
return {
tool_call_id: id,
role: "tool",
content: `Error executing ${name}: ${message}`
}
}
}
/**
* Execute multiple tool calls in parallel
*/
export async function executeTools(
workspacePath: string,
toolCalls: ToolCall[]
): Promise<ToolResult[]> {
return Promise.all(
toolCalls.map(tc => executeTool(workspacePath, tc))
)
}

View File

@@ -0,0 +1,13 @@
/**
* Tools Module Index
* Exports MCP-compatible tool definitions and executor for AI agent integration.
*/
export {
CORE_TOOLS,
executeTool,
executeTools,
type ToolDefinition,
type ToolCall,
type ToolResult
} from "./executor"

View File

@@ -0,0 +1,101 @@
/**
* User Context Module
* Manages the active user context for per-user config isolation
*/
import path from "path"
import os from "os"
import { existsSync, mkdirSync } from "fs"
const CONFIG_ROOT = path.join(os.homedir(), ".config", "codenomad")
const USERS_ROOT = path.join(CONFIG_ROOT, "users")
// Active user ID (set by the main process or HTTP header)
let activeUserId: string | null = null
/**
* Set the active user ID
*/
export function setActiveUserId(userId: string | null): void {
activeUserId = userId
console.log(`[UserContext] Active user set to: ${userId || "(none)"}`)
}
/**
* Get the active user ID
*/
export function getActiveUserId(): string | null {
return activeUserId
}
/**
* Get the data root for a specific user
* Falls back to global config if no user is set
*/
export function getUserDataRoot(userId?: string): string {
const effectiveUserId = userId || activeUserId
if (effectiveUserId) {
const userDir = path.join(USERS_ROOT, effectiveUserId)
return userDir
}
// Prioritize environment variable if set (from Electron)
const override = process.env.CODENOMAD_USER_DIR
if (override && override.trim().length > 0) {
return path.resolve(override)
}
// Fallback to global config root
return CONFIG_ROOT
}
/**
* Get the integrations directory for the current or specified user
*/
export function getUserIntegrationsDir(userId?: string): string {
const userRoot = getUserDataRoot(userId)
const integrationsDir = path.join(userRoot, "integrations")
// Ensure directory exists
if (!existsSync(integrationsDir)) {
try {
mkdirSync(integrationsDir, { recursive: true })
console.log(`[UserContext] Created integrations dir: ${integrationsDir}`)
} catch (e) {
console.error(`[UserContext] Failed to create integrations dir:`, e)
}
}
return integrationsDir
}
/**
* Get the instances directory for the current or specified user
*/
export function getUserInstancesDir(userId?: string): string {
const userRoot = getUserDataRoot(userId)
return path.join(userRoot, "instances")
}
/**
* Get the config file path for a specific integration
*/
export function getIntegrationConfigPath(integrationId: string, userId?: string): string {
const integrationsDir = getUserIntegrationsDir(userId)
return path.join(integrationsDir, `${integrationId}-config.json`)
}
/**
* Extract user ID from request headers
*/
export function getUserIdFromRequest(request: { headers?: Record<string, string | string[] | undefined> }): string | null {
const header = request.headers?.["x-user-id"]
if (typeof header === "string" && header.length > 0) {
return header
}
if (Array.isArray(header) && header.length > 0) {
return header[0]
}
return activeUserId
}

View File

@@ -1,14 +1,8 @@
import os from "os"
import { getUserDataRoot as getRoot, getUserInstancesDir as getInstances, getUserIntegrationsDir as getIntegrations } from "./user-context"
import path from "path"
const DEFAULT_ROOT = path.join(os.homedir(), ".config", "codenomad")
export function getUserDataRoot(): string {
const override = process.env.CODENOMAD_USER_DIR
if (override && override.trim().length > 0) {
return path.resolve(override)
}
return DEFAULT_ROOT
return getRoot()
}
export function getUserConfigPath(): string {
@@ -16,11 +10,11 @@ export function getUserConfigPath(): string {
}
export function getUserInstancesDir(): string {
return path.join(getUserDataRoot(), "instances")
return getInstances()
}
export function getUserIntegrationsDir(): string {
return path.join(getUserDataRoot(), "integrations")
return getIntegrations()
}
export function getOpencodeWorkspacesRoot(): string {

View File

@@ -11,6 +11,7 @@ import { WorkspaceDescriptor, WorkspaceFileResponse, FileSystemEntry } from "../
import { WorkspaceRuntime, ProcessExitInfo } from "./runtime"
import { Logger } from "../logger"
import { ensureWorkspaceOpencodeConfig } from "../opencode-config"
import { getContextEngineService } from "../context-engine"
const STARTUP_STABILITY_DELAY_MS = 1500
@@ -22,7 +23,7 @@ interface WorkspaceManagerOptions {
logger: Logger
}
interface WorkspaceRecord extends WorkspaceDescriptor {}
interface WorkspaceRecord extends WorkspaceDescriptor { }
export class WorkspaceManager {
private readonly workspaces = new Map<string, WorkspaceRecord>()
@@ -44,6 +45,31 @@ export class WorkspaceManager {
return this.workspaces.get(id)?.port
}
/**
* Generate a deterministic workspace ID based on folder path
* This ensures the same folder always gets the same workspace ID,
* allowing sessions to persist across app restarts
*/
private generateDeterministicId(folderPath: string): string {
// Normalize the path for consistent hashing across platforms
const normalizedPath = folderPath.replace(/\\/g, '/').toLowerCase()
// Simple hash function to create a short, deterministic ID
let hash = 0
for (let i = 0; i < normalizedPath.length; i++) {
const char = normalizedPath.charCodeAt(i)
hash = ((hash << 5) - hash) + char
hash = hash & hash // Convert to 32bit integer
}
// Convert to base36 and ensure positive
const hashStr = Math.abs(hash).toString(36)
// Return a short but unique ID
return hashStr.padStart(8, '0')
}
listFiles(workspaceId: string, relativePath = "."): FileSystemEntry[] {
const workspace = this.requireWorkspace(workspaceId)
const browser = new FileSystemBrowser({ rootDir: workspace.path })
@@ -67,14 +93,35 @@ export class WorkspaceManager {
}
async create(folder: string, name?: string): Promise<WorkspaceDescriptor> {
// Special constant for Native mode (no OpenCode binary)
const NATIVE_MODE_PATH = "__nomadarch_native__"
const id = `${Date.now().toString(36)}`
const binary = this.options.binaryRegistry.resolveDefault()
const resolvedBinaryPath = this.resolveBinaryPath(binary.path)
const workspacePath = path.isAbsolute(folder) ? folder : path.resolve(this.options.rootDir, folder)
// Generate a deterministic workspace ID based on the folder path
// This ensures the same folder always gets the same ID, allowing sessions to persist
const id = this.generateDeterministicId(workspacePath)
// Check if workspace already exists - if so, return the existing one
const existingWorkspace = this.workspaces.get(id)
if (existingWorkspace && existingWorkspace.status === "ready") {
this.options.logger.info({ workspaceId: id }, "Reusing existing workspace")
return existingWorkspace
}
clearWorkspaceSearchCache(workspacePath)
this.options.logger.info({ workspaceId: id, folder: workspacePath, binary: resolvedBinaryPath }, "Creating workspace")
// Check if we're in native mode
const isNativeMode = resolvedBinaryPath === NATIVE_MODE_PATH || binary.path === NATIVE_MODE_PATH
this.options.logger.info({
workspaceId: id,
folder: workspacePath,
binary: resolvedBinaryPath,
isNativeMode
}, "Creating workspace")
const proxyPath = `/workspaces/${id}/instance`
@@ -86,13 +133,14 @@ export class WorkspaceManager {
status: "starting",
proxyPath,
binaryId: resolvedBinaryPath,
binaryLabel: binary.label,
binaryVersion: binary.version,
binaryLabel: isNativeMode ? "NomadArch Native" : binary.label,
binaryVersion: isNativeMode ? "Native" : binary.version,
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
}
if (!descriptor.binaryVersion) {
// Native mode doesn't need binary version detection
if (!isNativeMode && !descriptor.binaryVersion) {
descriptor.binaryVersion = this.detectBinaryVersion(resolvedBinaryPath)
}
@@ -101,6 +149,31 @@ export class WorkspaceManager {
this.options.eventBus.publish({ type: "workspace.created", workspace: descriptor })
// In native mode, we don't launch a binary - the workspace is immediately ready
// and uses native session management APIs instead
if (isNativeMode) {
this.options.logger.info({ workspaceId: id }, "Starting native mode workspace (no binary)")
// Native mode is immediately ready - no process to launch
descriptor.status = "ready"
descriptor.updatedAt = new Date().toISOString()
// No pid or port for native mode - it uses the server's own APIs
this.options.eventBus.publish({ type: "workspace.started", workspace: descriptor })
this.options.logger.info({ workspaceId: id }, "Native mode workspace ready")
// Trigger Context-Engine indexing (non-blocking)
const contextEngine = getContextEngineService()
if (contextEngine) {
contextEngine.indexPath(workspacePath).catch((error) => {
this.options.logger.warn({ workspaceId: id, error }, "Context-Engine indexing failed")
})
}
return descriptor
}
// SDK/binary mode - launch the OpenCode process
const preferences = this.options.configStore.get().preferences ?? {}
const userEnvironment = preferences.environmentVariables ?? {}
const opencodeConfigDir = ensureWorkspaceOpencodeConfig(id)
@@ -126,6 +199,15 @@ export class WorkspaceManager {
descriptor.updatedAt = new Date().toISOString()
this.options.eventBus.publish({ type: "workspace.started", workspace: descriptor })
this.options.logger.info({ workspaceId: id, port }, "Workspace ready")
// Trigger Context-Engine indexing (non-blocking)
const contextEngine = getContextEngineService()
if (contextEngine) {
contextEngine.indexPath(workspacePath).catch((error) => {
this.options.logger.warn({ workspaceId: id, error }, "Context-Engine indexing failed")
})
}
return descriptor
} catch (error) {
descriptor.status = "error"

View File

@@ -28,11 +28,17 @@ interface ManagedProcess {
export class WorkspaceRuntime {
private processes = new Map<string, ManagedProcess>()
constructor(private readonly eventBus: EventBus, private readonly logger: Logger) {}
constructor(private readonly eventBus: EventBus, private readonly logger: Logger) { }
async launch(options: LaunchOptions): Promise<{ pid: number; port: number; exitPromise: Promise<ProcessExitInfo>; getLastOutput: () => string }> {
this.validateFolder(options.folder)
// Native mode should not use the runtime - it uses native session management instead
const NATIVE_MODE_PATH = "__nomadarch_native__"
if (options.binaryPath === NATIVE_MODE_PATH) {
throw new Error(`Native mode does not use binary launches. Use native session management APIs instead.`)
}
// Check if binary exists before attempting to launch
try {
accessSync(options.binaryPath, constants.F_OK)
@@ -58,7 +64,23 @@ export class WorkspaceRuntime {
const exitPromise = new Promise<ProcessExitInfo>((resolveExit) => {
exitResolve = resolveExit
})
let lastOutput = ""
// Store recent output for debugging - keep last 20 lines from each stream
const MAX_OUTPUT_LINES = 20
const recentStdout: string[] = []
const recentStderr: string[] = []
const getLastOutput = () => {
const combined: string[] = []
if (recentStderr.length > 0) {
combined.push("=== STDERR ===")
combined.push(...recentStderr.slice(-10))
}
if (recentStdout.length > 0) {
combined.push("=== STDOUT ===")
combined.push(...recentStdout.slice(-10))
}
return combined.join("\n")
}
return new Promise((resolve, reject) => {
this.logger.info(
@@ -149,7 +171,13 @@ export class WorkspaceRuntime {
for (const line of lines) {
const trimmed = line.trim()
if (!trimmed) continue
lastOutput = trimmed
// Store in recent buffer for debugging
recentStdout.push(trimmed)
if (recentStdout.length > MAX_OUTPUT_LINES) {
recentStdout.shift()
}
this.emitLog(options.workspaceId, "info", line)
if (!portFound) {
@@ -165,7 +193,6 @@ export class WorkspaceRuntime {
child.removeListener("error", handleError)
const port = parseInt(portMatch[1], 10)
this.logger.info({ workspaceId: options.workspaceId, port, matchedLine: trimmed }, "Workspace runtime allocated port - PORT DETECTED")
const getLastOutput = () => lastOutput.trim()
resolve({ pid: child.pid!, port, exitPromise, getLastOutput })
} else {
this.logger.debug({ workspaceId: options.workspaceId, line: trimmed }, "Port detection - no match in this line")
@@ -183,7 +210,13 @@ export class WorkspaceRuntime {
for (const line of lines) {
const trimmed = line.trim()
if (!trimmed) continue
lastOutput = `[stderr] ${trimmed}`
// Store in recent buffer for debugging
recentStderr.push(trimmed)
if (recentStderr.length > MAX_OUTPUT_LINES) {
recentStderr.shift()
}
this.emitLog(options.workspaceId, "error", line)
}
})

View File

@@ -45,6 +45,7 @@ import {
clearActiveParentSession,
createSession,
fetchSessions,
flushSessionPersistence,
updateSessionAgent,
updateSessionModel,
} from "./stores/sessions"
@@ -217,6 +218,7 @@ const App: Component = () => {
if (!confirmed) return
clearActiveParentSession(instanceId)
await stopInstance(instanceId)
}
@@ -244,6 +246,12 @@ const App: Component = () => {
return
}
try {
await flushSessionPersistence(instanceId)
} catch (error) {
log.error("Failed to flush session persistence before closing", error)
}
clearActiveParentSession(instanceId)
try {
@@ -462,15 +470,6 @@ const App: Component = () => {
<RemoteAccessOverlay open={remoteAccessOpen()} onClose={() => setRemoteAccessOpen(false)} />
<AlertDialog />
<Toaster
position="top-right"
gutter={16}
toastOptions={{
duration: 8000,
className: "bg-transparent border-none shadow-none p-0",
}}
/>
</div>
</>
)

View File

@@ -6,6 +6,8 @@ import OllamaCloudSettings from "./settings/OllamaCloudSettings"
import QwenCodeSettings from "./settings/QwenCodeSettings"
import ZAISettings from "./settings/ZAISettings"
import OpenCodeZenSettings from "./settings/OpenCodeZenSettings"
import AntigravitySettings from "./settings/AntigravitySettings"
import ApiStatusChecker from "./settings/ApiStatusChecker"
interface AdvancedSettingsModalProps {
open: boolean
@@ -16,7 +18,7 @@ interface AdvancedSettingsModalProps {
}
const AdvancedSettingsModal: Component<AdvancedSettingsModalProps> = (props) => {
const [activeTab, setActiveTab] = createSignal("general")
const [activeTab, setActiveTab] = createSignal("api-status")
return (
<Dialog open={props.open} onOpenChange={(open) => !open && props.onClose()}>
@@ -30,6 +32,15 @@ const AdvancedSettingsModal: Component<AdvancedSettingsModalProps> = (props) =>
<div class="border-b" style={{ "border-color": "var(--border-base)" }}>
<div class="flex w-full px-6 overflow-x-auto">
<button
class={`px-4 py-2 text-sm font-medium border-b-2 whitespace-nowrap ${activeTab() === "api-status"
? "border-green-500 text-green-400"
: "border-transparent hover:border-gray-300"
}`}
onClick={() => setActiveTab("api-status")}
>
📊 API Status
</button>
<button
class={`px-4 py-2 text-sm font-medium border-b-2 whitespace-nowrap ${activeTab() === "zen"
? "border-orange-500 text-orange-400"
@@ -75,10 +86,33 @@ const AdvancedSettingsModal: Component<AdvancedSettingsModalProps> = (props) =>
>
Z.AI
</button>
<button
class={`px-4 py-2 text-sm font-medium border-b-2 whitespace-nowrap ${activeTab() === "antigravity"
? "border-purple-500 text-purple-400"
: "border-transparent hover:border-gray-300"
}`}
onClick={() => setActiveTab("antigravity")}
>
🚀 Antigravity
</button>
</div>
</div>
<div class="flex-1 overflow-y-auto">
<Show when={activeTab() === "api-status"}>
<div class="p-6">
<ApiStatusChecker
onSettingsClick={(apiId) => {
if (apiId === "opencode-zen") setActiveTab("zen")
else if (apiId === "ollama-cloud") setActiveTab("ollama")
else if (apiId === "zai") setActiveTab("zai")
else if (apiId === "qwen-oauth") setActiveTab("qwen")
else if (apiId === "antigravity") setActiveTab("antigravity")
}}
/>
</div>
</Show>
<Show when={activeTab() === "zen"}>
<OpenCodeZenSettings />
</Show>
@@ -115,6 +149,10 @@ const AdvancedSettingsModal: Component<AdvancedSettingsModalProps> = (props) =>
<Show when={activeTab() === "zai"}>
<ZAISettings />
</Show>
<Show when={activeTab() === "antigravity"}>
<AntigravitySettings />
</Show>
</div>
<div class="px-6 py-4 border-t flex justify-end" style={{ "border-color": "var(--border-base)" }}>

View File

@@ -0,0 +1,528 @@
import { Component, createSignal, onMount, For, Show } from "solid-js"
import { Lock, User, ShieldCheck, Cpu, UserPlus, KeyRound, ArrowLeft, Ghost } from "lucide-solid"
import toast from "solid-toast"
import { isElectronHost } from "../../lib/runtime-env"
import { setActiveUserId } from "../../lib/user-context"
interface UserRecord {
id: string
name: string
isGuest?: boolean
}
interface LoginViewProps {
onLoginSuccess: (user: UserRecord) => void
}
type ViewMode = "login" | "register" | "reset"
const LoginView: Component<LoginViewProps> = (props) => {
const [users, setUsers] = createSignal<UserRecord[]>([])
const [username, setUsername] = createSignal("")
const [password, setPassword] = createSignal("")
const [confirmPassword, setConfirmPassword] = createSignal("")
const [newPassword, setNewPassword] = createSignal("")
const [isLoading, setIsLoading] = createSignal(false)
const [mode, setMode] = createSignal<ViewMode>("login")
const getApi = () => {
const api = (window as any).electronAPI
return api
}
const loadUsers = async () => {
try {
if (isElectronHost()) {
const api = getApi()
if (api?.listUsers) {
const userList = await api.listUsers()
if (userList && Array.isArray(userList)) {
setUsers(userList)
if (userList.length > 0 && !username()) {
setUsername(userList[0].name)
}
}
}
}
} catch (error) {
console.error("Failed to fetch users:", error)
}
}
onMount(loadUsers)
const resetForm = () => {
setPassword("")
setConfirmPassword("")
setNewPassword("")
}
const handleLogin = async (e: Event) => {
e.preventDefault()
const name = username().trim()
if (!name) {
toast.error("Identity required")
return
}
setIsLoading(true)
try {
if (isElectronHost()) {
const api = getApi()
if (!api?.listUsers || !api?.loginUser) {
toast.error("API bridge not ready")
return
}
const userList = await api.listUsers()
const user = userList?.find((u: UserRecord) => u.name.toLowerCase() === name.toLowerCase())
if (!user) {
toast.error(`Identity "${name}" not found`)
return
}
const result = await api.loginUser({
id: user.id,
password: password(),
})
if (result?.success) {
toast.success(`Welcome back, ${result.user.name}!`)
setActiveUserId(result.user.id)
props.onLoginSuccess(result.user)
} else {
toast.error("Invalid access key")
}
} else {
toast.success("Web mode access granted")
props.onLoginSuccess({ id: "web-user", name: username() || "Web Explorer" })
}
} catch (error) {
console.error("Login failed:", error)
toast.error("Authentication failed")
} finally {
setIsLoading(false)
}
}
const handleGuestLogin = async () => {
setIsLoading(true)
try {
const api = getApi()
if (api?.createGuest) {
const guestUser = await api.createGuest()
if (guestUser?.id) {
toast.success(`Welcome, ${guestUser.name}!`)
setActiveUserId(guestUser.id)
props.onLoginSuccess(guestUser)
} else {
toast.error("Failed to create guest session")
}
} else {
// Web mode fallback
const guestId = `guest-${Date.now()}`
toast.success("Guest session started")
props.onLoginSuccess({ id: guestId, name: "Guest", isGuest: true })
}
} catch (error) {
console.error("Guest login failed:", error)
toast.error("Guest login failed")
} finally {
setIsLoading(false)
}
}
const handleRegister = async (e: Event) => {
e.preventDefault()
const name = username().trim()
if (!name) {
toast.error("Username required")
return
}
if (name.length < 3) {
toast.error("Username must be at least 3 characters")
return
}
if (!password()) {
toast.error("Password required")
return
}
if (password().length < 4) {
toast.error("Password must be at least 4 characters")
return
}
if (password() !== confirmPassword()) {
toast.error("Passwords do not match")
return
}
// Check if user already exists
const existingUser = users().find(u => u.name.toLowerCase() === name.toLowerCase())
if (existingUser) {
toast.error(`Identity "${name}" already exists`)
return
}
setIsLoading(true)
try {
const api = getApi()
if (!api?.createUser) {
toast.error("Registration unavailable")
return
}
const newUser = await api.createUser({
name: name,
password: password(),
})
if (newUser?.id) {
toast.success(`Identity "${name}" created successfully!`)
await loadUsers()
setMode("login")
setUsername(name)
resetForm()
} else {
toast.error("Failed to create identity")
}
} catch (error) {
console.error("Registration failed:", error)
toast.error("Registration failed")
} finally {
setIsLoading(false)
}
}
const handleResetPassword = async (e: Event) => {
e.preventDefault()
const name = username().trim()
if (!name) {
toast.error("Select an identity first")
return
}
if (!password()) {
toast.error("Current password required")
return
}
if (!newPassword()) {
toast.error("New password required")
return
}
if (newPassword().length < 4) {
toast.error("New password must be at least 4 characters")
return
}
const user = users().find(u => u.name.toLowerCase() === name.toLowerCase())
if (!user) {
toast.error(`Identity "${name}" not found`)
return
}
setIsLoading(true)
try {
const api = getApi()
// First verify current password
const verifyResult = await api.loginUser({
id: user.id,
password: password(),
})
if (!verifyResult?.success) {
toast.error("Current password is incorrect")
return
}
// Update password
const updateResult = await api.updateUser({
id: user.id,
password: newPassword(),
})
if (updateResult?.id) {
toast.success("Password updated successfully!")
setMode("login")
resetForm()
} else {
toast.error("Failed to update password")
}
} catch (error) {
console.error("Password reset failed:", error)
toast.error("Password reset failed")
} finally {
setIsLoading(false)
}
}
const switchMode = (newMode: ViewMode) => {
setMode(newMode)
resetForm()
if (newMode === "register") {
setUsername("")
}
}
return (
<div class="fixed inset-0 z-[9999] flex items-center justify-center bg-[#0a0a0a]">
{/* Dynamic Background */}
<div class="absolute inset-0 overflow-hidden pointer-events-none opacity-20">
<div class="absolute -top-[10%] -left-[10%] w-[40%] h-[40%] bg-blue-500/20 blur-[120px] rounded-full animate-pulse" />
<div class="absolute -bottom-[10%] -right-[10%] w-[40%] h-[40%] bg-purple-500/20 blur-[120px] rounded-full animate-pulse delay-700" />
</div>
<div class="relative w-full max-w-md px-6 py-10 bg-[#141414]/80 backdrop-blur-xl border border-white/10 rounded-3xl shadow-2xl">
{/* Logo & Header */}
<div class="flex flex-col items-center mb-8">
<div class="w-16 h-16 mb-4 bg-gradient-to-br from-blue-500 via-indigo-600 to-purple-700 p-0.5 rounded-2xl shadow-lg transform rotate-3">
<div class="w-full h-full bg-[#141414] rounded-2xl flex items-center justify-center">
<Cpu class="w-8 h-8 text-white" />
</div>
</div>
<h1 class="text-2xl font-bold text-white tracking-tight mb-1">NomadArch</h1>
<p class="text-gray-400 text-sm">
{mode() === "login" && "Secure Neural Access Point"}
{mode() === "register" && "Create New Identity"}
{mode() === "reset" && "Reset Access Key"}
</p>
</div>
{/* Back button for non-login modes */}
<Show when={mode() !== "login"}>
<button
type="button"
onClick={() => switchMode("login")}
class="flex items-center gap-2 text-gray-400 hover:text-white transition-colors mb-4"
>
<ArrowLeft class="w-4 h-4" />
<span class="text-sm">Back to login</span>
</button>
</Show>
{/* Login Form */}
<Show when={mode() === "login"}>
<form onSubmit={handleLogin} class="space-y-5">
<div class="space-y-1.5">
<label class="text-xs font-semibold text-gray-500 uppercase tracking-wider ml-1">Identity</label>
<div class="relative group">
<div class="absolute inset-y-0 left-0 pl-4 flex items-center pointer-events-none">
<User class="w-5 h-5 text-gray-500 group-focus-within:text-blue-500 transition-colors" />
</div>
<input
type="text"
placeholder="Username"
value={username()}
onInput={(e) => setUsername(e.currentTarget.value)}
class="block w-full pl-12 pr-4 py-3.5 bg-[#1a1a1a] border border-white/5 rounded-2xl text-white focus:outline-none focus:ring-2 focus:ring-blue-500/50 transition-all"
list="identity-suggestions"
/>
<datalist id="identity-suggestions">
<For each={users()}>{(user) => <option value={user.name} />}</For>
</datalist>
</div>
</div>
<div class="space-y-1.5">
<label class="text-xs font-semibold text-gray-500 uppercase tracking-wider ml-1">Access Key</label>
<div class="relative group">
<div class="absolute inset-y-0 left-0 pl-4 flex items-center pointer-events-none">
<Lock class="w-5 h-5 text-gray-500 group-focus-within:text-blue-500 transition-colors" />
</div>
<input
type="password"
placeholder="Password"
value={password()}
onInput={(e) => setPassword(e.currentTarget.value)}
class="block w-full pl-12 pr-4 py-3.5 bg-[#1a1a1a] border border-white/5 rounded-2xl text-white placeholder-gray-600 focus:outline-none focus:ring-2 focus:ring-blue-500/50 transition-all font-mono"
/>
</div>
</div>
<button
type="submit"
disabled={isLoading() || !username()}
class="w-full flex items-center justify-center gap-3 py-3.5 bg-gradient-to-r from-blue-600 via-indigo-600 to-purple-600 hover:from-blue-500 hover:to-purple-500 text-white font-bold rounded-2xl shadow-xl transform active:scale-[0.98] transition-all disabled:opacity-50 disabled:cursor-not-allowed"
>
<Show when={isLoading()} fallback={<><ShieldCheck class="w-5 h-5" /><span>Verify Identity</span></>}>
<div class="w-5 h-5 border-2 border-white/30 border-t-white rounded-full animate-spin" />
<span>Verifying...</span>
</Show>
</button>
</form>
<div class="mt-6 flex flex-col gap-4">
<button
type="button"
onClick={handleGuestLogin}
disabled={isLoading()}
class="w-full flex items-center justify-center gap-2 py-3 bg-[#1a1a1a] hover:bg-[#252525] border border-white/10 text-gray-300 hover:text-white font-medium rounded-2xl transition-all disabled:opacity-50"
>
<Ghost class="w-5 h-5" />
<span>Continue as Guest</span>
</button>
<div class="flex items-center justify-between text-sm">
<button
type="button"
onClick={() => switchMode("register")}
class="flex items-center gap-1.5 text-gray-400 hover:text-blue-400 transition-colors"
>
<UserPlus class="w-4 h-4" />
<span>Create Identity</span>
</button>
<button
type="button"
onClick={() => switchMode("reset")}
class="flex items-center gap-1.5 text-gray-400 hover:text-purple-400 transition-colors"
>
<KeyRound class="w-4 h-4" />
<span>Reset Password</span>
</button>
</div>
</div>
</Show>
{/* Register Form */}
<Show when={mode() === "register"}>
<form onSubmit={handleRegister} class="space-y-5">
<div class="space-y-1.5">
<label class="text-xs font-semibold text-gray-500 uppercase tracking-wider ml-1">Choose Username</label>
<div class="relative group">
<div class="absolute inset-y-0 left-0 pl-4 flex items-center pointer-events-none">
<User class="w-5 h-5 text-gray-500 group-focus-within:text-green-500 transition-colors" />
</div>
<input
type="text"
placeholder="Enter username"
value={username()}
onInput={(e) => setUsername(e.currentTarget.value)}
class="block w-full pl-12 pr-4 py-3.5 bg-[#1a1a1a] border border-white/5 rounded-2xl text-white focus:outline-none focus:ring-2 focus:ring-green-500/50 transition-all"
/>
</div>
</div>
<div class="space-y-1.5">
<label class="text-xs font-semibold text-gray-500 uppercase tracking-wider ml-1">Choose Password</label>
<div class="relative group">
<div class="absolute inset-y-0 left-0 pl-4 flex items-center pointer-events-none">
<Lock class="w-5 h-5 text-gray-500 group-focus-within:text-green-500 transition-colors" />
</div>
<input
type="password"
placeholder="Enter password"
value={password()}
onInput={(e) => setPassword(e.currentTarget.value)}
class="block w-full pl-12 pr-4 py-3.5 bg-[#1a1a1a] border border-white/5 rounded-2xl text-white placeholder-gray-600 focus:outline-none focus:ring-2 focus:ring-green-500/50 transition-all font-mono"
/>
</div>
</div>
<div class="space-y-1.5">
<label class="text-xs font-semibold text-gray-500 uppercase tracking-wider ml-1">Confirm Password</label>
<div class="relative group">
<div class="absolute inset-y-0 left-0 pl-4 flex items-center pointer-events-none">
<Lock class="w-5 h-5 text-gray-500 group-focus-within:text-green-500 transition-colors" />
</div>
<input
type="password"
placeholder="Confirm password"
value={confirmPassword()}
onInput={(e) => setConfirmPassword(e.currentTarget.value)}
class="block w-full pl-12 pr-4 py-3.5 bg-[#1a1a1a] border border-white/5 rounded-2xl text-white placeholder-gray-600 focus:outline-none focus:ring-2 focus:ring-green-500/50 transition-all font-mono"
/>
</div>
</div>
<button
type="submit"
disabled={isLoading() || !username() || !password() || !confirmPassword()}
class="w-full flex items-center justify-center gap-3 py-3.5 bg-gradient-to-r from-green-600 via-emerald-600 to-teal-600 hover:from-green-500 hover:to-teal-500 text-white font-bold rounded-2xl shadow-xl transform active:scale-[0.98] transition-all disabled:opacity-50 disabled:cursor-not-allowed"
>
<Show when={isLoading()} fallback={<><UserPlus class="w-5 h-5" /><span>Create Identity</span></>}>
<div class="w-5 h-5 border-2 border-white/30 border-t-white rounded-full animate-spin" />
<span>Creating...</span>
</Show>
</button>
</form>
</Show>
{/* Reset Password Form */}
<Show when={mode() === "reset"}>
<form onSubmit={handleResetPassword} class="space-y-5">
<div class="space-y-1.5">
<label class="text-xs font-semibold text-gray-500 uppercase tracking-wider ml-1">Identity</label>
<div class="relative group">
<div class="absolute inset-y-0 left-0 pl-4 flex items-center pointer-events-none">
<User class="w-5 h-5 text-gray-500 group-focus-within:text-purple-500 transition-colors" />
</div>
<input
type="text"
placeholder="Username"
value={username()}
onInput={(e) => setUsername(e.currentTarget.value)}
class="block w-full pl-12 pr-4 py-3.5 bg-[#1a1a1a] border border-white/5 rounded-2xl text-white focus:outline-none focus:ring-2 focus:ring-purple-500/50 transition-all"
list="identity-suggestions-reset"
/>
<datalist id="identity-suggestions-reset">
<For each={users()}>{(user) => <option value={user.name} />}</For>
</datalist>
</div>
</div>
<div class="space-y-1.5">
<label class="text-xs font-semibold text-gray-500 uppercase tracking-wider ml-1">Current Password</label>
<div class="relative group">
<div class="absolute inset-y-0 left-0 pl-4 flex items-center pointer-events-none">
<Lock class="w-5 h-5 text-gray-500 group-focus-within:text-purple-500 transition-colors" />
</div>
<input
type="password"
placeholder="Enter current password"
value={password()}
onInput={(e) => setPassword(e.currentTarget.value)}
class="block w-full pl-12 pr-4 py-3.5 bg-[#1a1a1a] border border-white/5 rounded-2xl text-white placeholder-gray-600 focus:outline-none focus:ring-2 focus:ring-purple-500/50 transition-all font-mono"
/>
</div>
</div>
<div class="space-y-1.5">
<label class="text-xs font-semibold text-gray-500 uppercase tracking-wider ml-1">New Password</label>
<div class="relative group">
<div class="absolute inset-y-0 left-0 pl-4 flex items-center pointer-events-none">
<KeyRound class="w-5 h-5 text-gray-500 group-focus-within:text-purple-500 transition-colors" />
</div>
<input
type="password"
placeholder="Enter new password"
value={newPassword()}
onInput={(e) => setNewPassword(e.currentTarget.value)}
class="block w-full pl-12 pr-4 py-3.5 bg-[#1a1a1a] border border-white/5 rounded-2xl text-white placeholder-gray-600 focus:outline-none focus:ring-2 focus:ring-purple-500/50 transition-all font-mono"
/>
</div>
</div>
<button
type="submit"
disabled={isLoading() || !username() || !password() || !newPassword()}
class="w-full flex items-center justify-center gap-3 py-3.5 bg-gradient-to-r from-purple-600 via-violet-600 to-fuchsia-600 hover:from-purple-500 hover:to-fuchsia-500 text-white font-bold rounded-2xl shadow-xl transform active:scale-[0.98] transition-all disabled:opacity-50 disabled:cursor-not-allowed"
>
<Show when={isLoading()} fallback={<><KeyRound class="w-5 h-5" /><span>Reset Password</span></>}>
<div class="w-5 h-5 border-2 border-white/30 border-t-white rounded-full animate-spin" />
<span>Resetting...</span>
</Show>
</button>
</form>
</Show>
<div class="mt-6 text-center text-xs text-gray-600">
Powered by Antigravity OS v4.5 | Encrypted Connection
</div>
</div>
</div>
)
}
export default LoginView

View File

@@ -0,0 +1,320 @@
/**
* MINIMAL CHAT BYPASS
*
* This is a stripped-down chat component that:
* - Uses minimal store access (just for model/session info)
* - Makes direct fetch calls
* - Has NO complex effects/memos
* - Renders messages as a simple list
*
* Purpose: Test if the UI responsiveness issue is in the
* reactivity system or something else entirely.
*/
import { createSignal, For, Show, onMount } from "solid-js"
import { sessions } from "@/stores/session-state"
interface Message {
id: string
role: "user" | "assistant"
content: string
timestamp: number
status: "sending" | "streaming" | "complete" | "error"
}
interface MinimalChatProps {
instanceId: string
sessionId: string
}
export function MinimalChat(props: MinimalChatProps) {
const [messages, setMessages] = createSignal<Message[]>([])
const [inputText, setInputText] = createSignal("")
const [isLoading, setIsLoading] = createSignal(false)
const [error, setError] = createSignal<string | null>(null)
const [currentModel, setCurrentModel] = createSignal("minimax-m1")
let scrollContainer: HTMLDivElement | undefined
let inputRef: HTMLTextAreaElement | undefined
function generateId() {
return `msg_${Date.now()}_${Math.random().toString(36).slice(2, 9)}`
}
function scrollToBottom() {
if (scrollContainer) {
scrollContainer.scrollTop = scrollContainer.scrollHeight
}
}
// Get model from session on mount (one-time read, no reactive dependency)
onMount(() => {
try {
const instanceSessions = sessions().get(props.instanceId)
const session = instanceSessions?.get(props.sessionId)
if (session?.model?.modelId) {
setCurrentModel(session.model.modelId)
}
} catch (e) {
console.warn("Could not get session model, using default", e)
}
inputRef?.focus()
})
async function sendMessage() {
const text = inputText().trim()
if (!text || isLoading()) return
setError(null)
setInputText("")
setIsLoading(true)
const userMessage: Message = {
id: generateId(),
role: "user",
content: text,
timestamp: Date.now(),
status: "complete"
}
const assistantMessage: Message = {
id: generateId(),
role: "assistant",
content: "",
timestamp: Date.now(),
status: "streaming"
}
// Add messages to state
setMessages(prev => [...prev, userMessage, assistantMessage])
scrollToBottom()
try {
// Direct fetch with streaming
const response = await fetch("/api/ollama/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: currentModel(),
messages: [
...messages().filter(m => m.status === "complete").map(m => ({ role: m.role, content: m.content })),
{ role: "user", content: text }
],
stream: true
})
})
if (!response.ok) {
throw new Error(`Request failed: ${response.status}`)
}
const reader = response.body?.getReader()
if (!reader) throw new Error("No response body")
const decoder = new TextDecoder()
let fullContent = ""
let buffer = ""
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split("\n")
buffer = lines.pop() || ""
for (const line of lines) {
const trimmed = line.trim()
if (!trimmed.startsWith("data:")) continue
const data = trimmed.slice(5).trim()
if (!data || data === "[DONE]") continue
try {
const chunk = JSON.parse(data)
const delta = chunk?.message?.content
if (typeof delta === "string" && delta.length > 0) {
fullContent += delta
// Update assistant message content (simple state update)
setMessages(prev =>
prev.map(m =>
m.id === assistantMessage.id
? { ...m, content: fullContent }
: m
)
)
scrollToBottom()
}
} catch {
// Ignore parse errors
}
}
}
// Mark as complete
setMessages(prev =>
prev.map(m =>
m.id === assistantMessage.id
? { ...m, status: "complete" }
: m
)
)
} catch (e) {
const errorMsg = e instanceof Error ? e.message : "Unknown error"
setError(errorMsg)
// Mark as error
setMessages(prev =>
prev.map(m =>
m.id === assistantMessage.id
? { ...m, status: "error", content: `Error: ${errorMsg}` }
: m
)
)
} finally {
setIsLoading(false)
scrollToBottom()
}
}
function handleKeyDown(e: KeyboardEvent) {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault()
sendMessage()
}
}
return (
<div style={{
display: "flex",
"flex-direction": "column",
height: "100%",
background: "#0a0a0b",
color: "#e4e4e7"
}}>
{/* Header */}
<div style={{
padding: "16px",
"border-bottom": "1px solid #27272a",
background: "#18181b"
}}>
<h2 style={{ margin: 0, "font-size": "16px" }}>
🧪 Minimal Chat (Bypass Mode)
</h2>
<p style={{ margin: "4px 0 0", "font-size": "12px", color: "#71717a" }}>
Model: {currentModel()} | Testing UI responsiveness
</p>
</div>
{/* Messages */}
<div
ref={scrollContainer}
style={{
flex: 1,
overflow: "auto",
padding: "16px"
}}
>
<Show when={messages().length === 0}>
<div style={{
"text-align": "center",
color: "#71717a",
padding: "48px"
}}>
Send a message to test UI responsiveness
</div>
</Show>
<For each={messages()}>
{(message) => (
<div style={{
"margin-bottom": "16px",
padding: "12px",
background: message.role === "user" ? "#27272a" : "#18181b",
"border-radius": "8px",
"border-left": message.role === "assistant" ? "3px solid #6366f1" : "none"
}}>
<div style={{
"font-size": "11px",
color: "#71717a",
"margin-bottom": "8px"
}}>
{message.role === "user" ? "You" : "Assistant"}
{message.status === "streaming" && " (streaming...)"}
{message.status === "error" && " (error)"}
</div>
<div style={{
"white-space": "pre-wrap",
"word-break": "break-word",
"font-size": "14px",
"line-height": "1.6"
}}>
{message.content || (message.status === "streaming" ? "▋" : "")}
</div>
</div>
)}
</For>
</div>
{/* Error display */}
<Show when={error()}>
<div style={{
padding: "8px 16px",
background: "#7f1d1d",
color: "#fecaca",
"font-size": "12px"
}}>
Error: {error()}
</div>
</Show>
{/* Input area */}
<div style={{
padding: "16px",
"border-top": "1px solid #27272a",
background: "#18181b"
}}>
<div style={{ display: "flex", gap: "8px" }}>
<textarea
ref={inputRef}
value={inputText()}
onInput={(e) => setInputText(e.currentTarget.value)}
onKeyDown={handleKeyDown}
placeholder="Type a message... (Enter to send)"
disabled={isLoading()}
style={{
flex: 1,
padding: "12px",
background: "#27272a",
border: "1px solid #3f3f46",
"border-radius": "8px",
color: "#e4e4e7",
resize: "none",
"font-size": "14px",
"min-height": "48px",
"max-height": "150px"
}}
rows={1}
/>
<button
onClick={sendMessage}
disabled={isLoading() || !inputText().trim()}
style={{
padding: "12px 24px",
background: isLoading() ? "#3f3f46" : "#6366f1",
color: "white",
border: "none",
"border-radius": "8px",
cursor: isLoading() ? "wait" : "pointer",
"font-weight": "600"
}}
>
{isLoading() ? "..." : "Send"}
</button>
</div>
</div>
</div>
)
}
export default MinimalChat

View File

@@ -1,7 +1,7 @@
import { createSignal, Show, onMount, For, createMemo, createEffect, onCleanup } from "solid-js";
import { createSignal, Show, onMount, For, createMemo, createEffect, onCleanup, untrack } from "solid-js";
import { sessions, withSession, setActiveSession } from "@/stores/session-state";
import { instances } from "@/stores/instances";
import { sendMessage, compactSession, updateSessionAgent, updateSessionModelForSession } from "@/stores/session-actions";
import { sendMessage, compactSession, updateSessionAgent, updateSessionModelForSession, forceReset } from "@/stores/session-actions";
import { addTask, setActiveTask, archiveTask } from "@/stores/task-actions";
import { messageStoreBus } from "@/stores/message-v2/bus";
import MessageBlockList, { getMessageAnchorId } from "@/components/message-block-list";
@@ -10,6 +10,7 @@ import { addToTaskQueue, getSoloState, setActiveTaskId, toggleAutonomous, toggle
import { getLogger } from "@/lib/logger";
import { clearCompactionSuggestion, getCompactionSuggestion } from "@/stores/session-compaction";
import { emitSessionSidebarRequest } from "@/lib/session-sidebar-events";
import { detectAgentWorkingState, getAgentStatusMessage } from "@/lib/agent-status-detection";
import {
Command,
Plus,
@@ -42,6 +43,7 @@ import {
} from "lucide-solid";
import ModelSelector from "@/components/model-selector";
import AgentSelector from "@/components/agent-selector";
import { DebugOverlay, setForceResetFn } from "@/components/debug-overlay";
import AttachmentChip from "@/components/attachment-chip";
import { createFileAttachment } from "@/types/attachment";
import type { InstanceMessageStore } from "@/stores/message-v2/instance-store";
@@ -96,18 +98,10 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
// Message store integration
const messageStore = () => messageStoreBus.getOrCreate(props.instanceId);
const lastAssistantIndex = () => {
const ids = filteredMessageIds();
const store = messageStore();
for (let i = ids.length - 1; i >= 0; i--) {
const msg = store.getMessage(ids[i]);
if (msg?.role === "assistant") return i;
}
return -1;
};
// Filter messages based on selected task - use store's session messages for the task session
const filteredMessageIds = () => {
// Must be defined before lastAssistantIndex which depends on it
const filteredMessageIds = createMemo(() => {
const task = selectedTask();
if (!task) return []; // Show no messages in Pipeline view
@@ -119,7 +113,27 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
// Fallback to task.messageIds for backward compatibility
return task.messageIds || [];
};
});
// Memoized to prevent recalculation on every render
const lastAssistantIndex = createMemo(() => {
const ids = filteredMessageIds();
if (ids.length === 0) return -1;
const store = messageStore();
// Only check last few messages to find the last assistant (optimization)
const startIndex = Math.max(0, ids.length - 5);
for (let i = ids.length - 1; i >= startIndex; i--) {
const msg = store.getMessage(ids[i]);
if (msg?.role === "assistant") return i;
}
// If not found in last 5, fall back to full scan (rare case)
for (let i = startIndex - 1; i >= 0; i--) {
const msg = store.getMessage(ids[i]);
if (msg?.role === "assistant") return i;
}
return -1;
});
// Note: Auto-scroll is handled in two places:
// 1. After sending a message (in handleSendMessage)
@@ -133,6 +147,7 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
});
const tokenStats = createMemo(() => {
return untrack(() => {
const usage = sessionUsage();
return {
used: usage?.actualUsageTokens ?? 0,
@@ -145,14 +160,9 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
cost: usage?.totalCost ?? 0,
};
});
// Get current model from active task session
const currentModel = createMemo(() => {
const instanceSessions = sessions().get(props.instanceId);
const session = instanceSessions?.get(activeTaskSessionId());
return session?.model?.modelId || "unknown";
});
// Get active task session ID (must be defined before memos that use it)
const activeTaskSessionId = createMemo(() => {
const task = selectedTask();
return task?.taskSessionId || props.sessionId;
@@ -163,6 +173,13 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
return instanceSessions?.get(activeTaskSessionId());
});
// Get current model from active task session
const currentModel = createMemo(() => {
const instanceSessions = sessions().get(props.instanceId);
const session = instanceSessions?.get(activeTaskSessionId());
return session?.model?.modelId || "unknown";
});
const currentTaskAgent = createMemo(() => activeTaskSession()?.agent || "");
const currentTaskModel = createMemo(() => activeTaskSession()?.model || { providerId: "", modelId: "" });
@@ -195,55 +212,101 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
// Show thinking while we're actively sending
if (isSending()) return true;
const store = messageStore();
// Check for streaming in the specific task session
const taskSessionId = activeTaskSessionId();
const sessionRecord = store.state.sessions[taskSessionId];
const sessionMessages = sessionRecord ? sessionRecord.messageIds : [];
const isAnyStreaming = sessionMessages.some((id: string) => {
const m = store.getMessage(id);
return m?.role === "assistant" && (m.status === "streaming" || m.status === "sending");
});
if (isAnyStreaming) return true;
// Also check the filtered message IDs (for tasks)
const ids = filteredMessageIds();
if (ids.length === 0) return false;
const store = messageStore();
const lastMsg = store.getMessage(ids[ids.length - 1]);
return lastMsg?.role === "assistant" && (lastMsg.status === "streaming" || lastMsg.status === "sending");
});
// Auto-scroll during streaming - must be after isAgentThinking is defined
createEffect(() => {
const streaming = isAgentThinking();
if (!streaming) return;
// During streaming, scroll periodically to keep up with content (unless user is scrolling)
const interval = setInterval(() => {
if (!userScrolling()) {
scrollToBottom();
// Basic check: streaming or sending status
if (lastMsg?.role === "assistant" && (lastMsg.status === "streaming" || lastMsg.status === "sending")) {
return true;
}
}, 300);
return () => clearInterval(interval);
// Enhanced check: semantic detection for "standby", "processing" messages
// This catches Ollama models that output status messages and pause
if (lastMsg?.role === "assistant") {
const workingState = detectAgentWorkingState(lastMsg);
return workingState.isWorking;
}
return false;
});
// Auto-scroll when new messages arrive
// Get dynamic status message for display
const agentStatusMessage = createMemo(() => {
const ids = filteredMessageIds();
if (ids.length === 0) return "THINKING";
const store = messageStore();
const lastMsg = store.getMessage(ids[ids.length - 1]);
if (!lastMsg || lastMsg.role !== "assistant") {
return "THINKING";
}
const statusMsg = getAgentStatusMessage(lastMsg);
return statusMsg?.toUpperCase() || "THINKING";
});
// Auto-scroll during streaming - DISABLED for performance testing
// createEffect(() => {
// const streaming = isAgentThinking();
// if (!streaming) return;
//
// let lastScrollTime = 0;
// const scrollThrottled = () => {
// const now = Date.now();
// if (now - lastScrollTime > 500) {
// lastScrollTime = now;
// if (!userScrolling()) {
// scrollToBottom();
// }
// }
// };
//
// const interval = setInterval(() => {
// if (!userScrolling()) {
// requestAnimationFrame(scrollToBottom);
// }
// }, 200);
// return () => clearInterval(interval);
// });
// Auto-scroll when new messages arrive (throttled to count changes only)
let lastScrolledCount = 0;
let scrollTimeoutId: ReturnType<typeof setTimeout> | undefined;
createEffect(() => {
const ids = filteredMessageIds();
const thinking = isAgentThinking();
const count = ids.length;
// Scroll when message count changes or when thinking starts (unless user is scrolling)
if ((ids.length > 0 || thinking) && !userScrolling()) {
// Only scroll when message COUNT changes, not on every store update
// This prevents the effect from firing on every streaming chunk
// Note: Streaming scrolling is handled by the interval in the isAgentThinking effect above
if (count !== lastScrolledCount && count > 0 && !userScrolling()) {
lastScrolledCount = count;
// Clear any existing timeout to prevent timer accumulation
if (scrollTimeoutId) {
clearTimeout(scrollTimeoutId);
}
// Use requestAnimationFrame for smoother scrolling without locking specific frames
requestAnimationFrame(() => {
setTimeout(scrollToBottom, 50);
scrollToBottom();
});
}
});
// Scroll event listener to detect user scrolling
onMount(() => {
// Wire up debug overlay reset function (must be inside onMount to avoid SolidJS errors)
setForceResetFn(() => {
forceReset();
setIsSending(false);
});
const handleScroll = () => {
if (scrollContainer) {
const isScrollingUp = scrollContainer.scrollTop < lastScrollTop();
@@ -264,40 +327,54 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
container?.addEventListener('scroll', handleScroll, { passive: true });
return () => {
container?.removeEventListener('scroll', handleScroll);
// Enhanced cleanup: remove scroll listener and clear any pending timeouts
if (container) {
container.removeEventListener('scroll', handleScroll);
}
// Clear any pending scroll timeout
if (scrollTimeoutId) {
clearTimeout(scrollTimeoutId);
}
};
});
// Additional cleanup on component unmount
onCleanup(() => {
if (scrollTimeoutId) {
clearTimeout(scrollTimeoutId);
}
});
const handleSendMessage = async () => {
const message = chatInput().trim();
if (!message || isSending()) return;
setIsSending(true);
log.info("handleSendMessage started", {
instanceId: props.instanceId,
sessionId: props.sessionId,
selectedTaskId: selectedTaskId(),
messageLength: message.length
});
// Yield to let UI update "Is Sending" state before heavy operations
await new Promise(resolve => setTimeout(resolve, 0));
// Reduced logging to prevent IPC overload
// log.info("handleSendMessage started", { ... });
try {
let taskId = selectedTaskId();
let targetSessionId = props.sessionId;
// If no task selected, create one automatically
if (!taskId) {
log.info("No task selected, creating new task");
// log.info("No task selected, creating new task");
const title = message.length > 30 ? message.substring(0, 27) + "..." : message;
const result = await addTask(props.instanceId, props.sessionId, title);
taskId = result.id;
targetSessionId = result.taskSessionId || props.sessionId;
log.info("New task created", { taskId, targetSessionId });
// log.info("New task created", { taskId, targetSessionId });
setSelectedTaskId(taskId);
// If autonomous mode is on, we might want to queue it or set it as active
const s = solo();
if (s.isAutonomous) {
log.info("Autonomous mode active, setting active task or queuing");
// log.info("Autonomous mode active, setting active task or queuing");
if (!s.activeTaskId) {
setActiveTaskId(props.instanceId, taskId);
} else {
@@ -309,14 +386,14 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
targetSessionId = task?.taskSessionId || props.sessionId;
}
log.info("Target session identified", { targetSessionId, taskId });
// log.info("Target session identified", { targetSessionId, taskId });
const store = messageStore();
log.info("Message store check before sending", {
/* log.info("Message store check before sending", {
instanceId: props.instanceId,
storeExists: !!store,
messageCount: store?.getSessionMessageIds(targetSessionId).length
});
}); */
await sendMessage(
props.instanceId,
@@ -326,18 +403,18 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
taskId || undefined
);
log.info("sendMessage call completed");
// log.info("sendMessage call completed");
setChatInput("");
setAttachments([]);
// Auto-scroll to bottom after sending
setTimeout(scrollToBottom, 100);
} catch (error) {
log.error("handleSendMessage failed", error);
// log.error("handleSendMessage failed", error);
console.error("[MultiTaskChat] Send failed:", error);
} finally {
setIsSending(false);
log.info("handleSendMessage finished");
// log.info("handleSendMessage finished");
}
};
@@ -365,7 +442,15 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
};
// Stop/cancel the current agent operation
const handleStopAgent = async () => {
const handleStopAgent = async (e?: MouseEvent) => {
// Check for Force Reset (Shift + Click)
if (e?.shiftKey) {
log.warn("Shift+Click detected on Stop Agent - Triggering Force Reset");
forceReset();
setIsSending(false);
return;
}
const task = selectedTask();
if (!task) return;
@@ -441,6 +526,7 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
return (
<main class="absolute inset-0 flex flex-col bg-[#0a0a0b] text-zinc-300 font-sans selection:bg-indigo-500/30 overflow-hidden">
<DebugOverlay />
{/* Header */}
<header class="h-14 px-4 flex items-center justify-between bg-zinc-900/60 backdrop-blur-xl border-b border-white/5 relative z-30 shrink-0">
<div class="flex items-center space-x-3">
@@ -483,7 +569,7 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
<Show when={isAgentThinking()}>
<div class="flex items-center space-x-2 px-3 py-1.5 bg-violet-500/15 border border-violet-500/30 rounded-lg animate-pulse shadow-[0_0_20px_rgba(139,92,246,0.2)]">
<Sparkles size={12} class="text-violet-400 animate-spin" style={{ "animation-duration": "3s" }} />
<span class="text-[10px] font-black text-violet-400 uppercase tracking-tight">Streaming</span>
<span class="text-[10px] font-black text-violet-400 uppercase tracking-tight">{agentStatusMessage()}</span>
<span class="text-[10px] font-bold text-violet-300">{formatTokenTotal(tokenStats().used)}</span>
</div>
</Show>
@@ -790,7 +876,7 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
<div class="w-1 h-1 bg-indigo-400 rounded-full animate-bounce" style={{ "animation-delay": "150ms" }} />
<div class="w-1 h-1 bg-indigo-400 rounded-full animate-bounce" style={{ "animation-delay": "300ms" }} />
</div>
<span class="text-[9px] font-bold text-indigo-400">{isAgentThinking() ? "THINKING" : "SENDING"}</span>
<span class="text-[9px] font-bold text-indigo-400">{isSending() ? "SENDING" : agentStatusMessage()}</span>
</div>
</Show>
</div>
@@ -869,7 +955,7 @@ export default function MultiTaskChat(props: MultiTaskChatProps) {
<button
onClick={handleStopAgent}
class="px-3 py-1.5 bg-rose-500/20 hover:bg-rose-500/30 text-rose-300 rounded-lg text-[10px] font-bold uppercase tracking-wide transition-all border border-rose-500/30"
title="Stop response"
title="Stop response (Shift+Click to Force Reset UI)"
>
<StopCircle size={12} class="inline-block mr-1" />
Stop

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,101 @@
/**
* SimpleMessageBlock - Polling-based message renderer
*
* Updates content via interval, not reactive cascade.
* This prevents the freeze during streaming.
*/
import { createSignal, Show, onMount, onCleanup } from "solid-js";
import type { InstanceMessageStore } from "@/stores/message-v2/instance-store";
interface SimpleMessageBlockProps {
messageId: string;
store: () => InstanceMessageStore;
}
export function SimpleMessageBlock(props: SimpleMessageBlockProps) {
const [content, setContent] = createSignal("");
const [isStreaming, setIsStreaming] = createSignal(false);
const [isUser, setIsUser] = createSignal(false);
const [timestamp, setTimestamp] = createSignal("");
const [tokenCount, setTokenCount] = createSignal(0);
function updateFromStore() {
const message = props.store().getMessage(props.messageId);
if (!message) return;
setIsUser(message.role === "user");
setIsStreaming(message.status === "streaming" || message.status === "sending");
// Extract text content from parts
const parts = message.parts || {};
let text = "";
for (const partId of Object.keys(parts)) {
const partRecord = parts[partId];
if (partRecord?.data?.type === "text") {
text = (partRecord.data as any).text || "";
break;
}
}
// Fallback to direct content
if (!text && (message as any).content) {
text = (message as any).content;
}
setContent(text);
setTokenCount(Math.ceil(text.length / 4));
// Note: MessageRecord doesn't have time property, skip timestamp
}
onMount(() => {
updateFromStore();
// Poll for updates during streaming (every 100ms)
const interval = setInterval(() => {
const msg = props.store().getMessage(props.messageId);
if (msg?.status === "streaming" || msg?.status === "sending" || isStreaming()) {
updateFromStore();
}
}, 100);
onCleanup(() => clearInterval(interval));
});
return (
<div
id={`message-anchor-${props.messageId}`}
class={`rounded-xl p-4 transition-all min-w-0 overflow-hidden ${isUser()
? "bg-zinc-800/50 border border-zinc-700/50"
: "bg-zinc-900/50 border border-indigo-500/20"
}`}
>
<div class="flex items-center justify-between mb-2">
<div class="flex items-center gap-2">
<div class={`text-[10px] font-bold uppercase tracking-wide ${isUser() ? "text-indigo-400" : "text-emerald-400"}`}>
{isUser() ? "You" : "Assistant"}
</div>
<Show when={isStreaming()}>
<div class="flex items-center gap-2">
<div class="flex items-center gap-1 text-[9px] text-violet-400">
<div class="w-1.5 h-1.5 bg-violet-400 rounded-full animate-pulse" />
<span>Thinking...</span>
</div>
<span class="text-[9px] font-mono text-zinc-500 bg-zinc-800/50 px-1 rounded">
{tokenCount()} tks
</span>
</div>
</Show>
</div>
<div class="text-[9px] text-zinc-600">{timestamp()}</div>
</div>
<div
class="text-sm text-zinc-100 leading-relaxed whitespace-pre-wrap break-words overflow-hidden"
style={{ "word-break": "break-word", "overflow-wrap": "anywhere" }}
>
{content() || (isStreaming() ? "▋" : "")}
</div>
</div>
);
}

View File

@@ -0,0 +1,8 @@
// Re-export all MultiX v2 components
export { default as MultiXV2 } from "./index";
export { SimpleMessageBlock } from "./core/SimpleMessageBlock";
export { PipelineView } from "./features/PipelineView";
export { MessageNavSidebar } from "./features/MessageNavSidebar";
export { LiteAgentSelector } from "./features/LiteAgentSelector";
export { LiteModelSelector } from "./features/LiteModelSelector";
export { enhancePrompt, getQuickTips } from "./features/PromptEnhancer";

View File

@@ -0,0 +1,637 @@
/**
* LiteAgentSelector - Non-reactive agent selector for MultiX v2
*
* Uses polling instead of reactive subscriptions to prevent cascading updates.
* Includes AI Agent Generator feature.
*/
import { createSignal, For, onMount, onCleanup, Show } from "solid-js";
import { agents, setAgents, providers } from "@/stores/session-state";
import { fetchAgents } from "@/stores/session-api";
import { updateInstanceConfig } from "@/stores/instance-config";
import { toast } from "solid-toast";
import { ChevronDown, Bot, Plus, Sparkles, Loader2, Save, X, RefreshCw } from "lucide-solid";
import { serverApi } from "@/lib/api-client";
interface LiteAgentSelectorProps {
instanceId: string;
sessionId: string;
currentAgent: string;
onAgentChange: (agent: string) => void;
}
interface AgentInfo {
name: string;
description?: string;
systemPrompt?: string;
}
export function LiteAgentSelector(props: LiteAgentSelectorProps) {
const [isOpen, setIsOpen] = createSignal(false);
const [agentList, setAgentList] = createSignal<AgentInfo[]>([]);
const [isGenerating, setIsGenerating] = createSignal(false);
const [showGenerator, setShowGenerator] = createSignal(false);
const [generatorInput, setGeneratorInput] = createSignal("");
const [generatedAgent, setGeneratedAgent] = createSignal<AgentInfo | null>(null);
const [isSaving, setIsSaving] = createSignal(false);
const [selectedModel, setSelectedModel] = createSignal("glm-4");
const [availableModels, setAvailableModels] = createSignal<{ id: string, name: string, provider: string }[]>([]);
// Load agents once on mount, then poll
function loadAgents() {
try {
const instanceAgents = agents().get(props.instanceId) || [];
const nonSubagents = instanceAgents.filter((a: any) => a.mode !== "subagent");
setAgentList(nonSubagents.map((a: any) => ({
name: a.name,
description: a.description,
systemPrompt: a.systemPrompt
})));
} catch (e) {
console.warn("Failed to load agents", e);
}
}
onMount(() => {
loadAgents();
// Populate available models
const allProviders = providers().get(props.instanceId) || [];
const models: { id: string, name: string, provider: string }[] = [];
allProviders.forEach(p => {
p.models.forEach(m => {
models.push({ id: m.id, name: m.name || m.id, provider: p.id });
});
});
// Add defaults if none found
if (models.length === 0) {
models.push({ id: "glm-4", name: "GLM-4 (Z.AI)", provider: "zai" });
models.push({ id: "qwen-coder-plus-latest", name: "Qwen Coder Plus (Zen)", provider: "opencode-zen" });
models.push({ id: "minimax-m1", name: "MiniMax M1 (Ollama)", provider: "ollama" });
}
setAvailableModels(models);
// Poll every 5 seconds (agents don't change often)
const interval = setInterval(loadAgents, 5000);
onCleanup(() => clearInterval(interval));
});
const handleSelect = (agentName: string) => {
props.onAgentChange(agentName);
setIsOpen(false);
};
const handleGenerateAgent = async () => {
const input = generatorInput().trim();
if (!input || isGenerating()) return;
setIsGenerating(true);
const modelInfo = availableModels().find(m => m.id === selectedModel());
// Normalize provider ID - handle variants like "ollama-cloud" -> "ollama"
let provider = modelInfo?.provider || "zai";
if (provider.includes("ollama")) provider = "ollama";
if (provider.includes("zen")) provider = "opencode-zen";
console.log(`[AgentGenerator] Using provider: ${provider}, model: ${selectedModel()}`);
// AI generation prompt - focused on unique, creative output
const generationPrompt = `Create a unique AI coding assistant agent based on: "${input}"
RULES:
1. NAME: Create a catchy, memorable 1-3 word name (e.g., "Neon Architect", "Logic Ghost", "Cortex", "Syntax Specter"). BE CREATIVE!
2. DESCRIPTION: One powerful sentence about their unique paradigm or specialty.
3. SYSTEM PROMPT: Write a 400+ word deep-dive into their psyche, expertise, and operational style.
- DO NOT be generic.
- Give them a clear VOICE and philosophy.
- Professional, yet distinct.
- Mention specific methodologies they favor.
- Explain how they view the relationship between code and problem-solving.
IMPORTANT: Return ONLY valid JSON in this format:
{"name": "...", "description": "...", "systemPrompt": "..."}`;
const endpoints: Record<string, string> = {
"zai": "/api/zai/chat",
"opencode-zen": "/api/opencode-zen/chat",
"ollama": "/api/ollama/chat"
};
// Timeout wrapper for fetch with 60 second limit
const fetchWithTimeout = async (url: string, options: RequestInit, timeoutMs: number = 60000) => {
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), timeoutMs);
try {
const response = await fetch(url, { ...options, signal: controller.signal });
clearTimeout(timeoutId);
return response;
} catch (e) {
clearTimeout(timeoutId);
throw e;
}
};
const tryEndpoint = async (prov: string, model: string) => {
try {
console.log(`[AgentGenerator] Attempting generation with ${prov}/${model}...`);
// Use absolute URL from serverApi to avoid port issues
const baseUrl = serverApi.getApiBase();
const endpoint = `${baseUrl}${endpoints[prov]}`;
if (!endpoints[prov]) {
console.warn(`[AgentGenerator] No endpoint configured for provider: ${prov}`);
return null;
}
const response = await fetchWithTimeout(endpoint, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: model,
messages: [{ role: "user", content: generationPrompt }],
stream: false
})
}, 60000); // 60 second timeout
if (response.ok) {
const data = await response.json();
const content = prov === "zai" || prov === "opencode-zen"
? (data?.choices?.[0]?.message?.content || data?.message?.content || "")
: (data?.message?.content || "");
console.log(`[AgentGenerator] Received content from ${prov}:`, content.substring(0, 100) + "...");
const result = tryParseAgentJson(content, input);
if (result) return result;
console.warn(`[AgentGenerator] Failed to parse JSON from ${prov} response`);
} else {
const errText = await response.text();
console.error(`[AgentGenerator] Endpoint ${prov} returned ${response.status}:`, errText);
}
} catch (e: any) {
if (e.name === 'AbortError') {
console.warn(`[AgentGenerator] Request to ${prov} timed out after 60s`);
toast.error(`Generation timed out. Try a faster model.`, { duration: 5000 });
} else {
console.warn(`[AgentGenerator] Endpoint ${prov} failed:`, e);
}
}
return null;
};
// 1. Try selected model
let parsed = await tryEndpoint(provider, selectedModel());
// 2. Fallbacks if selected fails - try faster models
if (!parsed) {
console.log("[AgentGenerator] Selected model failed, trying fallbacks...");
const fallbacks = [
{ prov: "ollama", model: "qwen3:8b" },
{ prov: "opencode-zen", model: "qwen-coder-plus-latest" },
{ prov: "zai", model: "glm-4" },
].filter(f => f.model !== selectedModel());
for (const f of fallbacks) {
parsed = await tryEndpoint(f.prov, f.model);
if (parsed) break;
}
}
if (parsed) {
setGeneratedAgent(parsed);
toast.success("Agent generated!", { icon: "🎉", duration: 3000 });
} else {
console.warn("[AgentGenerator] All AI endpoints failed, using smart fallback");
setGeneratedAgent(generateSmartFallback(input));
toast.success("Agent created (local fallback)", { duration: 3000 });
}
setIsGenerating(false);
};
// Try to parse JSON from AI response
const tryParseAgentJson = (content: string, input: string): { name: string; description: string; systemPrompt: string } | null => {
try {
const jsonMatch = content.match(/\{[\s\S]*\}/);
if (jsonMatch) {
const parsed = JSON.parse(jsonMatch[0]);
if (parsed.name && parsed.systemPrompt && parsed.systemPrompt.length > 100) {
return {
name: parsed.name,
description: parsed.description || input,
systemPrompt: parsed.systemPrompt
};
}
}
} catch (e) {
console.error("JSON parse error:", e);
}
return null;
};
// Generate a smart fallback that actually feels unique
const generateSmartFallback = (input: string): { name: string; description: string; systemPrompt: string } => {
const name = generateFallbackName(input);
const timestamp = Date.now();
// Create unique content based on input analysis
const inputLower = input.toLowerCase();
const isFrontend = /react|vue|angular|css|html|ui|frontend|web/.test(inputLower);
const isBackend = /api|server|node|python|database|backend/.test(inputLower);
const isFullStack = /full.?stack|complete|everything/.test(inputLower);
const isAI = /ai|ml|machine|learning|neural|gpt|claude|llm/.test(inputLower);
const isDevOps = /devops|docker|kubernetes|ci|cd|deploy/.test(inputLower);
let specialty = "general software development";
let techStack = "JavaScript, TypeScript, Python";
let uniqueTrait = "methodical approach to problem-solving";
if (isFrontend) {
specialty = "frontend architecture and user experience";
techStack = "React, Vue, TypeScript, CSS, Tailwind";
uniqueTrait = "pixel-perfect attention to detail and smooth animations";
} else if (isBackend) {
specialty = "backend systems and API design";
techStack = "Node.js, Python, PostgreSQL, Redis, GraphQL";
uniqueTrait = "building scalable, fault-tolerant services";
} else if (isFullStack) {
specialty = "end-to-end application development";
techStack = "React, Node.js, PostgreSQL, Docker, AWS";
uniqueTrait = "seamless integration between frontend and backend";
} else if (isAI) {
specialty = "AI/ML integration and prompt engineering";
techStack = "Python, LangChain, OpenAI, HuggingFace, Vector DBs";
uniqueTrait = "crafting intelligent, context-aware AI solutions";
} else if (isDevOps) {
specialty = "infrastructure and deployment automation";
techStack = "Docker, Kubernetes, Terraform, GitHub Actions, AWS";
uniqueTrait = "zero-downtime deployments and infrastructure as code";
}
return {
name,
description: `Expert in ${specialty} with ${uniqueTrait}`,
systemPrompt: `You are ${name}, a senior software engineer with 10+ years of expertise in ${specialty}.
## Your Personality
You are confident but humble, always explaining your reasoning clearly. You prefer elegant, maintainable solutions over clever hacks. When you don't know something, you say so honestly and suggest ways to find the answer.
## Technical Expertise
Your primary stack: ${techStack}
Your specialty: ${specialty}
Your unique strength: ${uniqueTrait}
## How You Work
1. **Understand First**: Before writing code, you analyze the existing codebase structure, patterns, and conventions
2. **Plan Carefully**: You outline your approach before implementing, considering edge cases and potential issues
3. **Code Quality**: Every line you write follows best practices - clean naming, proper error handling, comprehensive types
4. **Test Thinking**: You consider how code will be tested, even if tests aren't explicitly requested
5. **Documentation**: You add meaningful comments for complex logic, not obvious operations
## Code Standards You Follow
- Use descriptive variable and function names that reveal intent
- Keep functions small and focused (single responsibility)
- Handle errors gracefully with informative messages
- Prefer composition over inheritance
- Write self-documenting code, supplement with comments only where needed
- Always consider performance implications
## Communication Style
- Be direct and actionable in your responses
- When suggesting changes, explain WHY not just WHAT
- If multiple approaches exist, briefly mention pros/cons
- Celebrate good code when you see it
- Provide constructive feedback on improvements
## Tool Usage
- Use read_file to understand existing code before modifying
- Use list_files to understand project structure
- Use write_file to create or update files with complete, working code
- Always verify syntax correctness before submitting
Built for: ${input}
Session ID: ${timestamp}`
};
};
// Generate a professional fallback name from user input
const generateFallbackName = (input: string): string => {
// Extract key words and create a professional sounding name
const words = input.toLowerCase().split(/\s+/).filter(w => w.length > 2);
// Common tech keywords to look for
const keywords: Record<string, string> = {
'typescript': 'TypeScript Pro',
'javascript': 'JS Expert',
'react': 'React Master',
'python': 'Python Guru',
'api': 'API Architect',
'code': 'Code Expert',
'full': 'Full Stack Pro',
'frontend': 'Frontend Master',
'backend': 'Backend Pro',
'mcp': 'MCP Specialist',
'agent': 'Smart Agent',
'thinking': 'Deep Thinker',
'claude': 'AI Assistant',
'smart': 'Smart Coder',
'fix': 'Bug Hunter',
'test': 'Test Master',
'debug': 'Debug Pro',
'architect': 'Code Architect',
'review': 'Code Reviewer'
};
// Try to find a matching keyword
for (const word of words) {
for (const [key, name] of Object.entries(keywords)) {
if (word.includes(key)) {
return name;
}
}
}
// Default: Create from first few words
const titleWords = words.slice(0, 2).map(w =>
w.charAt(0).toUpperCase() + w.slice(1)
);
return titleWords.length > 0 ? titleWords.join(' ') + ' Pro' : 'Custom Agent';
}
// Generate a sophisticated fallback prompt when API fails
const generateFallbackPrompt = (description: string): string => {
return `# ${description}
## IDENTITY & CORE MISSION
You are a world-class AI coding assistant specialized in: ${description}. You combine deep technical expertise with exceptional problem-solving abilities to deliver production-ready code that exceeds professional standards.
## CODEBASE AWARENESS PROTOCOL
Before writing any code, you MUST:
1. **Analyze Context**: Understand the existing project structure, patterns, and conventions
2. **Identify Dependencies**: Check package.json, imports, and installed libraries
3. **Match Style**: Adapt your output to the existing code style in the project
4. **Verify Compatibility**: Ensure new code integrates seamlessly with existing modules
## TECHNICAL EXPERTISE
- **Languages**: JavaScript, TypeScript, Python, and relevant frameworks
- **Patterns**: SOLID principles, DRY, KISS, Clean Architecture
- **Testing**: TDD approach, comprehensive test coverage
- **Documentation**: Clear comments, JSDoc/TSDoc, README updates
## CODING STANDARDS
1. **Naming**: Use descriptive, intention-revealing names
2. **Functions**: Single responsibility, max 20-30 lines per function
3. **Error Handling**: Always handle errors gracefully with informative messages
4. **Types**: Prefer strict typing, avoid \`any\` type
5. **Comments**: Explain WHY, not WHAT (the code explains what)
## ARCHITECTURAL PRINCIPLES
- Favor composition over inheritance
- Implement proper separation of concerns
- Design for extensibility and maintainability
- Consider performance implications of design choices
- Apply appropriate design patterns (Factory, Strategy, Observer, etc.)
## COMMUNICATION STYLE
- Be concise but thorough in explanations
- Provide rationale for technical decisions
- Offer alternatives when relevant
- Acknowledge limitations and edge cases
- Use code examples to illustrate concepts
## TOOL USAGE
When modifying the codebase:
1. Use \`read_file\` to understand existing code before making changes
2. Use \`list_files\` to understand project structure
3. Use \`write_file\` to create or update files with complete, working code
4. Always verify your changes are syntactically correct
5. Consider impact on other files that may need updates
## OUTPUT QUALITY STANDARDS
Every piece of code you generate must be:
- ✅ Syntactically correct and immediately runnable
- ✅ Following existing project conventions
- ✅ Properly typed (if TypeScript)
- ✅ Including necessary imports
- ✅ Handling edge cases and errors
- ✅ Well-documented where appropriate
You are committed to excellence and take pride in delivering code that professionals would admire.`
}
const handleSaveAgent = async () => {
const agent = generatedAgent();
if (!agent || isSaving()) return;
setIsSaving(true);
const toastId = toast.loading("Saving agent...");
try {
// Save to backend
const response = await fetch(`/api/workspaces/${props.instanceId}/agents`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
name: agent.name,
description: agent.description,
systemPrompt: agent.systemPrompt,
mode: "agent"
})
});
if (response.ok) {
// CRITICAL: Update local instance config to keep it in sync with backend
// This is the source of truth that fetchAgents() reads from
await updateInstanceConfig(props.instanceId, (draft) => {
if (!draft.customAgents) {
draft.customAgents = [];
}
const existingIndex = draft.customAgents.findIndex(a => a.name === agent.name);
const agentData = {
name: agent.name,
description: agent.description || "",
prompt: agent.systemPrompt || ""
};
if (existingIndex >= 0) {
draft.customAgents[existingIndex] = agentData;
} else {
draft.customAgents.push(agentData);
}
});
// Fetch fresh agents from backend to update global signals
await fetchAgents(props.instanceId);
// Refresh local agent list
loadAgents();
// Manual update to ensure immediate feedback (fix for list lag)
setAgentList(prev => {
if (prev.some(a => a.name === agent.name)) return prev;
return [...prev, { name: agent.name, description: agent.description, systemPrompt: agent.systemPrompt }];
});
// Select the new agent
props.onAgentChange(agent.name);
toast.success(`Agent "${agent.name}" saved and activated!`, { id: toastId });
// Close generator
setShowGenerator(false);
setGeneratedAgent(null);
setGeneratorInput("");
setIsOpen(false);
} else {
const errorData = await response.json().catch(() => ({}));
console.error("Failed to save agent:", response.status, errorData);
toast.error(`Failed to save agent: ${errorData.error || response.statusText}`, { id: toastId });
}
} catch (error) {
console.error("Failed to save agent:", error);
toast.error("Network error while saving agent", { id: toastId });
} finally {
setIsSaving(false);
}
};
return (
<div class="relative">
<button
onClick={() => setIsOpen(!isOpen())}
class="flex items-center justify-between w-full px-3 py-2 bg-zinc-900/60 border border-white/10 rounded-lg text-left hover:border-indigo-500/30 transition-all"
>
<div class="flex items-center gap-2">
<Bot size={14} class="text-indigo-400" />
<span class="text-[11px] font-bold text-zinc-200 truncate">
{props.currentAgent || "Select Agent"}
</span>
</div>
<ChevronDown size={12} class={`text-zinc-500 transition-transform ${isOpen() ? "rotate-180" : ""}`} />
</button>
<Show when={isOpen()}>
<div class="absolute top-full left-0 right-0 mt-1 bg-zinc-900 border border-white/10 rounded-lg shadow-xl z-50 max-h-[80vh] overflow-y-auto">
{/* Agent Generator Toggle */}
<button
onClick={() => setShowGenerator(!showGenerator())}
class="w-full px-3 py-2 text-left hover:bg-indigo-500/10 transition-colors flex items-center gap-2 border-b border-white/5 text-indigo-400"
>
<Sparkles size={12} />
<span class="text-[11px] font-bold">AI Agent Generator</span>
<Plus size={12} class="ml-auto" />
</button>
{/* Generator Panel */}
<Show when={showGenerator()}>
<div class="p-3 border-b border-white/10 bg-zinc-950/50 space-y-3">
<div class="space-y-1">
<div class="text-[10px] text-zinc-400 font-medium">Generation Model:</div>
<select
value={selectedModel()}
onChange={(e) => setSelectedModel(e.currentTarget.value)}
class="w-full bg-zinc-800 border border-white/10 rounded px-2 py-1.5 text-[10px] text-zinc-200 outline-none focus:border-indigo-500/50"
>
<For each={availableModels()}>
{(model) => (
<option value={model.id}>{model.name}</option>
)}
</For>
</select>
</div>
<div class="text-[10px] text-zinc-400 font-medium">
Describe the agent you want to create:
</div>
<textarea
value={generatorInput()}
onInput={(e) => setGeneratorInput(e.currentTarget.value)}
placeholder="e.g., A TypeScript expert who focuses on clean code and best practices..."
class="w-full bg-zinc-800 border border-white/10 rounded-lg px-3 py-2 text-[11px] text-zinc-200 placeholder-zinc-600 resize-none outline-none focus:border-indigo-500/50"
rows={3}
/>
<div class="flex items-center gap-2">
<button
onClick={handleGenerateAgent}
disabled={!generatorInput().trim() || isGenerating()}
class="flex-1 px-3 py-1.5 bg-indigo-500/20 border border-indigo-500/40 rounded-lg text-[10px] font-bold text-indigo-300 hover:bg-indigo-500/30 disabled:opacity-50 disabled:cursor-not-allowed flex items-center justify-center gap-2"
>
<Show when={isGenerating()} fallback={<Sparkles size={12} />}>
<Loader2 size={12} class="animate-spin" />
</Show>
{isGenerating() ? "Generating..." : "Generate Agent"}
</button>
</div>
{/* Generated Agent Preview */}
<Show when={generatedAgent()}>
<div class="bg-zinc-800/50 rounded-lg p-3 border border-emerald-500/30 space-y-2">
<div class="flex items-center justify-between">
<span class="text-[10px] font-bold text-emerald-400">Generated Agent</span>
<button
onClick={() => setGeneratedAgent(null)}
class="text-zinc-500 hover:text-zinc-300"
>
<X size={12} />
</button>
</div>
<div class="text-[12px] font-bold text-zinc-100">{generatedAgent()?.name}</div>
<div class="text-[10px] text-zinc-400">{generatedAgent()?.description}</div>
<div class="text-[9px] text-zinc-400 max-h-60 overflow-y-auto whitespace-pre-wrap font-mono bg-black/20 p-2 rounded border border-white/5">
{generatedAgent()?.systemPrompt}
</div>
<button
onClick={handleSaveAgent}
disabled={isSaving()}
class="w-full flex items-center justify-center gap-2 py-2 bg-emerald-600 hover:bg-emerald-500 disabled:opacity-50 disabled:cursor-not-allowed text-white rounded-md text-[11px] font-bold transition-all shadow-lg active:scale-95"
>
<Show when={isSaving()} fallback={<Save size={14} />}>
<Loader2 size={14} class="animate-spin" />
</Show>
{isSaving() ? "Saving..." : "Save & Use Agent"}
</button>
</div>
</Show>
</div>
</Show>
{/* Agent List */}
<div class="px-3 py-1.5 flex items-center justify-between border-t border-white/5 bg-zinc-950/30">
<span class="text-[9px] font-bold text-zinc-500 uppercase tracking-widest">Saved Agents</span>
<button
onClick={(e) => { e.stopPropagation(); loadAgents(); fetchAgents(props.instanceId); }}
class="p-1 hover:bg-white/5 rounded text-zinc-500 hover:text-zinc-300 transition-colors"
title="Refresh agents"
>
<RefreshCw size={10} />
</button>
</div>
<div class="max-h-48 overflow-y-auto custom-scrollbar">
<For each={agentList()}>
{(agent) => (
<button
onClick={() => handleSelect(agent.name)}
class={`w-full px-3 py-2 text-left hover:bg-white/5 transition-colors flex items-center gap-2 ${props.currentAgent === agent.name ? "bg-indigo-500/10 text-indigo-300" : "text-zinc-300"
}`}
>
<Bot size={12} class="text-zinc-500" />
<div class="min-w-0">
<div class="text-[11px] font-bold truncate">{agent.name}</div>
{agent.description && (
<div class="text-[9px] text-zinc-500 truncate">{agent.description}</div>
)}
</div>
</button>
)}
</For>
<Show when={agentList().length === 0}>
<div class="px-3 py-2 text-[10px] text-zinc-600">No agents available</div>
</Show>
</div>
</div>
</Show>
</div>
);
}

View File

@@ -0,0 +1,121 @@
/**
* LiteModelSelector - Non-reactive model selector for MultiX v2
*
* Uses polling instead of reactive subscriptions to prevent cascading updates.
*/
import { createSignal, For, onMount, onCleanup, Show } from "solid-js";
import { providers } from "@/stores/session-state";
import { ChevronDown, Cpu } from "lucide-solid";
interface Model {
id: string;
name: string;
providerId: string;
}
interface Provider {
id: string;
name: string;
models: Model[];
}
interface LiteModelSelectorProps {
instanceId: string;
sessionId: string;
currentModel: { providerId: string; modelId: string };
onModelChange: (model: { providerId: string; modelId: string }) => void;
}
export function LiteModelSelector(props: LiteModelSelectorProps) {
const [isOpen, setIsOpen] = createSignal(false);
const [providerList, setProviderList] = createSignal<Provider[]>([]);
// Load providers once on mount, then poll
function loadProviders() {
try {
const instanceProviders = providers().get(props.instanceId) || [];
setProviderList(instanceProviders.map((p: any) => ({
id: p.id,
name: p.name,
models: (p.models || []).map((m: any) => ({
id: m.id,
name: m.name,
providerId: p.id,
})),
})));
} catch (e) {
console.warn("Failed to load providers", e);
}
}
onMount(() => {
loadProviders();
// Poll every 10 seconds (providers don't change often)
const interval = setInterval(loadProviders, 10000);
onCleanup(() => clearInterval(interval));
});
const handleSelect = (providerId: string, modelId: string) => {
props.onModelChange({ providerId, modelId });
setIsOpen(false);
};
const getCurrentModelName = () => {
if (!props.currentModel.modelId) return "Select Model";
for (const provider of providerList()) {
for (const model of provider.models) {
if (model.id === props.currentModel.modelId) {
return model.name;
}
}
}
return props.currentModel.modelId;
};
return (
<div class="relative">
<button
onClick={() => setIsOpen(!isOpen())}
class="flex items-center justify-between w-full px-3 py-2 bg-zinc-900/60 border border-white/10 rounded-lg text-left hover:border-indigo-500/30 transition-all"
>
<div class="flex items-center gap-2">
<Cpu size={14} class="text-emerald-400" />
<span class="text-[11px] font-bold text-zinc-200 truncate">
{getCurrentModelName()}
</span>
</div>
<ChevronDown size={12} class={`text-zinc-500 transition-transform ${isOpen() ? "rotate-180" : ""}`} />
</button>
<Show when={isOpen()}>
<div class="absolute top-full left-0 right-0 mt-1 bg-zinc-900 border border-white/10 rounded-lg shadow-xl z-50 max-h-64 overflow-y-auto">
<For each={providerList()}>
{(provider) => (
<div>
<div class="px-3 py-1.5 text-[9px] font-bold text-zinc-500 uppercase tracking-wide bg-zinc-950/50 sticky top-0">
{provider.name}
</div>
<For each={provider.models}>
{(model) => (
<button
onClick={() => handleSelect(provider.id, model.id)}
class={`w-full px-3 py-2 text-left hover:bg-white/5 transition-colors flex items-center gap-2 ${props.currentModel.modelId === model.id ? "bg-emerald-500/10 text-emerald-300" : "text-zinc-300"
}`}
>
<Cpu size={12} class="text-zinc-500" />
<span class="text-[11px] font-medium truncate">{model.name}</span>
</button>
)}
</For>
</div>
)}
</For>
<Show when={providerList().length === 0}>
<div class="px-3 py-2 text-[10px] text-zinc-600">No models available</div>
</Show>
</div>
</Show>
</div>
);
}

View File

@@ -0,0 +1,230 @@
/**
* LiteSkillsSelector - Non-reactive skills selector for MultiX v2
*
* Uses polling instead of reactive subscriptions to prevent cascading updates.
* Displays selected skills as chips with ability to add/remove.
*/
import { createSignal, For, onMount, onCleanup, Show } from "solid-js";
import { catalog, catalogLoading, loadCatalog } from "@/stores/skills";
import { getSessionSkills, setSessionSkills } from "@/stores/session-state";
import { ChevronDown, Sparkles, X, Check, Loader2 } from "lucide-solid";
import type { SkillSelection } from "@/types/session";
interface LiteSkillsSelectorProps {
instanceId: string;
sessionId: string;
}
interface SkillInfo {
id: string;
name: string;
description?: string;
}
export function LiteSkillsSelector(props: LiteSkillsSelectorProps) {
const [isOpen, setIsOpen] = createSignal(false);
const [skillList, setSkillList] = createSignal<SkillInfo[]>([]);
const [selectedSkills, setSelectedSkills] = createSignal<SkillSelection[]>([]);
const [isLoading, setIsLoading] = createSignal(false);
const [filterText, setFilterText] = createSignal("");
// Load skills once on mount, then poll
function loadSkills() {
try {
const skills = catalog();
setSkillList(skills.map((s) => ({
id: s.id,
name: s.name || s.id,
description: s.description
})));
} catch (e) {
console.warn("Failed to load skills", e);
}
}
function loadSelected() {
try {
const skills = getSessionSkills(props.instanceId, props.sessionId);
setSelectedSkills(skills);
} catch (e) {
console.warn("Failed to load selected skills", e);
}
}
onMount(async () => {
// Load catalog if not already loaded
if (catalog().length === 0) {
setIsLoading(true);
await loadCatalog();
setIsLoading(false);
}
loadSkills();
loadSelected();
// Poll every 2 seconds
const interval = setInterval(() => {
loadSkills();
loadSelected();
}, 2000);
onCleanup(() => clearInterval(interval));
});
const toggleSkill = (skill: SkillInfo) => {
const current = selectedSkills();
const isSelected = current.some(s => s.id === skill.id);
let next: SkillSelection[];
if (isSelected) {
next = current.filter(s => s.id !== skill.id);
} else {
next = [...current, { id: skill.id, name: skill.name, description: skill.description }];
}
setSelectedSkills(next);
setSessionSkills(props.instanceId, props.sessionId, next);
};
const removeSkill = (id: string) => {
const next = selectedSkills().filter(s => s.id !== id);
setSelectedSkills(next);
setSessionSkills(props.instanceId, props.sessionId, next);
};
const filteredSkills = () => {
const term = filterText().toLowerCase().trim();
if (!term) return skillList();
return skillList().filter(s =>
s.name.toLowerCase().includes(term) ||
s.id.toLowerCase().includes(term) ||
(s.description?.toLowerCase().includes(term) ?? false)
);
};
const isSkillSelected = (id: string) => selectedSkills().some(s => s.id === id);
return (
<div class="relative w-full">
{/* Main Button */}
<button
onClick={() => setIsOpen(!isOpen())}
class="flex items-center justify-between w-full px-3 py-2 bg-zinc-900/60 border border-white/10 rounded-lg text-left hover:border-purple-500/30 transition-all"
>
<div class="flex items-center gap-2 min-w-0 flex-1">
<Sparkles size={14} class="text-purple-400 shrink-0" />
<Show
when={selectedSkills().length > 0}
fallback={<span class="text-[11px] text-zinc-500">No skills</span>}
>
<div class="flex items-center gap-1 overflow-hidden">
<span class="text-[11px] font-bold text-purple-300">
{selectedSkills().length} skill{selectedSkills().length !== 1 ? 's' : ''}
</span>
<For each={selectedSkills().slice(0, 2)}>
{(skill) => (
<span class="text-[10px] px-1.5 py-0.5 bg-purple-500/20 text-purple-300 rounded truncate max-w-[80px]">
{skill.name}
</span>
)}
</For>
<Show when={selectedSkills().length > 2}>
<span class="text-[10px] text-zinc-500">+{selectedSkills().length - 2}</span>
</Show>
</div>
</Show>
</div>
<ChevronDown size={12} class={`text-zinc-500 transition-transform shrink-0 ${isOpen() ? "rotate-180" : ""}`} />
</button>
{/* Dropdown */}
<Show when={isOpen()}>
<div class="absolute top-full left-0 right-0 mt-1 bg-zinc-900 border border-white/10 rounded-lg shadow-xl z-50 max-h-80 overflow-hidden flex flex-col">
{/* Selected Skills Chips */}
<Show when={selectedSkills().length > 0}>
<div class="px-3 py-2 border-b border-white/5 flex flex-wrap gap-1">
<For each={selectedSkills()}>
{(skill) => (
<span class="inline-flex items-center gap-1 px-2 py-0.5 bg-purple-500/20 text-purple-300 rounded-full text-[10px]">
{skill.name}
<button
onClick={(e) => {
e.stopPropagation();
removeSkill(skill.id);
}}
class="hover:text-red-400"
>
<X size={10} />
</button>
</span>
)}
</For>
</div>
</Show>
{/* Filter Input */}
<div class="px-3 py-2 border-b border-white/5">
<input
type="text"
placeholder="Filter skills..."
value={filterText()}
onInput={(e) => setFilterText(e.currentTarget.value)}
class="w-full bg-white/5 border border-white/10 rounded px-2 py-1 text-xs text-zinc-200 outline-none focus:border-purple-500/40"
/>
</div>
{/* Skills List */}
<div class="overflow-y-auto flex-1 max-h-48">
<Show
when={!isLoading() && !catalogLoading()}
fallback={
<div class="px-3 py-4 text-center text-[11px] text-zinc-500 flex items-center justify-center gap-2">
<Loader2 size={12} class="animate-spin" />
Loading skills...
</div>
}
>
<Show
when={filteredSkills().length > 0}
fallback={
<div class="px-3 py-4 text-center text-[11px] text-zinc-500">
No skills found
</div>
}
>
<For each={filteredSkills()}>
{(skill) => (
<button
onClick={() => toggleSkill(skill)}
class={`w-full px-3 py-2 text-left hover:bg-white/5 transition-colors flex items-center gap-2 ${isSkillSelected(skill.id) ? "bg-purple-500/10" : ""
}`}
>
<div class={`w-4 h-4 rounded border flex items-center justify-center shrink-0 ${isSkillSelected(skill.id)
? "bg-purple-500 border-purple-500"
: "border-white/20"
}`}>
<Show when={isSkillSelected(skill.id)}>
<Check size={10} class="text-white" />
</Show>
</div>
<div class="flex-1 min-w-0">
<div class={`text-[11px] font-medium truncate ${isSkillSelected(skill.id) ? "text-purple-300" : "text-zinc-300"
}`}>
{skill.name}
</div>
<Show when={skill.description}>
<div class="text-[10px] text-zinc-500 truncate">
{skill.description}
</div>
</Show>
</div>
</button>
)}
</For>
</Show>
</Show>
</div>
</div>
</Show>
</div>
);
}

View File

@@ -0,0 +1,87 @@
/**
* MessageNavSidebar - Quick navigation for messages
*
* Shows YOU/ASST labels with hover preview.
*/
import { For, Show, createSignal, type Accessor } from "solid-js";
import type { InstanceMessageStore } from "@/stores/message-v2/instance-store";
interface MessageNavSidebarProps {
messageIds: Accessor<string[]>;
store: () => InstanceMessageStore;
scrollContainer: HTMLDivElement | undefined;
onTabClick: (messageId: string) => void;
}
export function MessageNavSidebar(props: MessageNavSidebarProps) {
return (
<div class="w-14 shrink-0 bg-zinc-900/40 border-l border-white/5 overflow-hidden py-2 px-1.5 flex flex-col items-center gap-1">
<For each={props.messageIds()}>
{(messageId, index) => {
const [showPreview, setShowPreview] = createSignal(false);
const msg = () => props.store().getMessage(messageId);
const isUser = () => msg()?.role === "user";
// Get message preview text (first 150 chars)
const previewText = () => {
const message = msg();
if (!message) return "";
// Try to get text from parts
const parts = message.parts || {};
let text = "";
for (const partId of Object.keys(parts)) {
const partRecord = parts[partId];
if (partRecord?.data?.type === "text") {
text = (partRecord.data as any).text || "";
break;
}
}
// Fallback to direct content
if (!text && (message as any).content) {
text = (message as any).content;
}
return text.length > 150 ? text.substring(0, 150) + "..." : text;
};
return (
<div class="relative group">
<button
onClick={() => props.onTabClick(messageId)}
onMouseEnter={() => setShowPreview(true)}
onMouseLeave={() => setShowPreview(false)}
class={`w-10 py-1.5 rounded text-[8px] font-black uppercase transition-all cursor-pointer ${isUser()
? "bg-indigo-500/20 border border-indigo-500/40 text-indigo-400 hover:bg-indigo-500/40 hover:scale-105"
: "bg-emerald-500/20 border border-emerald-500/40 text-emerald-400 hover:bg-emerald-500/40 hover:scale-105"
}`}
>
{isUser() ? "YOU" : "ASST"}
</button>
{/* Hover Preview Tooltip */}
<Show when={showPreview()}>
<div class="absolute right-full mr-2 top-0 w-72 max-h-40 overflow-y-auto bg-zinc-900 border border-white/10 rounded-lg shadow-xl p-3 z-50 animate-in fade-in slide-in-from-right-2 duration-150 custom-scrollbar">
<div class="flex items-center justify-between mb-2">
<div class={`text-[9px] font-bold uppercase ${isUser() ? "text-indigo-400" : "text-emerald-400"}`}>
{isUser() ? "You" : "Assistant"} Msg {index() + 1}
</div>
<div class="text-[8px] text-zinc-600">
{msg()?.status === "streaming" ? "• Streaming" : ""}
</div>
</div>
<p class="text-[10px] text-zinc-300 leading-relaxed whitespace-pre-wrap">
{previewText()}
</p>
</div>
</Show>
</div>
);
}}
</For>
</div>
);
}

View File

@@ -0,0 +1,89 @@
/**
* PipelineView - Task Dashboard
*
* Shows all active tasks as cards when no task is selected.
*/
import { For, Show, type Accessor } from "solid-js";
import { Plus, ChevronRight, X } from "lucide-solid";
import type { Task } from "@/types/session";
interface PipelineViewProps {
visibleTasks: Accessor<Task[]>;
onTaskClick: (taskId: string) => void;
onArchiveTask: (taskId: string) => void;
}
export function PipelineView(props: PipelineViewProps) {
return (
<div class="p-4 space-y-8 animate-in fade-in slide-in-from-bottom-4 duration-500">
<div class="space-y-2">
<h2 class="text-2xl font-black text-white tracking-tight leading-none">Pipeline</h2>
<p class="text-xs font-medium text-zinc-500 uppercase tracking-[0.2em]">Agentic Orchestration</p>
</div>
<div class="space-y-4">
<div class="flex items-center justify-between">
<span class="text-[10px] font-bold text-zinc-600 uppercase tracking-widest">Active Threads</span>
<div class="h-px flex-1 bg-white/5 mx-4" />
<span class="text-[10px] font-black text-indigo-400 bg-indigo-500/10 px-2 py-0.5 rounded border border-indigo-500/20">
{props.visibleTasks().length}
</span>
</div>
<div class="grid gap-3">
<Show when={props.visibleTasks().length === 0}>
<div class="group relative p-8 rounded-3xl border border-dashed border-white/5 bg-zinc-900/20 flex flex-col items-center justify-center text-center space-y-4 transition-all hover:bg-zinc-900/40 hover:border-white/10">
<div class="w-12 h-12 rounded-2xl bg-white/5 flex items-center justify-center text-zinc-600 group-hover:text-indigo-400 group-hover:scale-110 transition-all duration-500">
<Plus size={24} strokeWidth={1.5} />
</div>
<div class="space-y-1">
<p class="text-sm font-bold text-zinc-400">No active tasks</p>
<p class="text-[11px] text-zinc-600">Send a message below to start a new thread</p>
</div>
</div>
</Show>
<For each={props.visibleTasks()}>
{(task) => (
<button
onClick={() => props.onTaskClick(task.id)}
class={`group relative p-4 rounded-2xl border border-white/5 bg-zinc-900/40 hover:bg-zinc-800/60 hover:border-indigo-500/30 transition-all duration-300 text-left flex items-start space-x-4 active:scale-[0.98] ${task.title.toLowerCase().includes("smart fix") ? "smart-fix-highlight" : ""}`}
>
<div class={`mt-1 w-2 h-2 rounded-full shadow-[0_0_10px_rgba(var(--color),0.5)] ${task.status === "completed" ? "bg-emerald-500 shadow-emerald-500/40" :
task.status === "in-progress" ? "bg-indigo-500 shadow-indigo-500/40 animate-pulse" :
"bg-zinc-600 shadow-zinc-600/20"
}`} />
<div class="flex-1 min-w-0 space-y-1">
<p class="text-sm font-bold text-zinc-100 truncate group-hover:text-white transition-colors">
{task.title}
</p>
<div class="flex items-center space-x-3 text-[10px] font-bold text-zinc-500 uppercase tracking-tight">
<span>{new Date(task.timestamp).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}</span>
<span class="w-1 h-1 rounded-full bg-zinc-800" />
<span>{task.messageIds?.length || 0} messages</span>
</div>
</div>
<div class="flex items-center space-x-2">
<span
role="button"
tabindex={0}
onClick={(event) => {
event.stopPropagation();
props.onArchiveTask(task.id);
}}
class="text-zinc-600 hover:text-zinc-200 transition-colors"
title="Archive task"
>
<X size={14} />
</span>
<ChevronRight size={16} class="text-zinc-700 group-hover:text-indigo-400 group-hover:translate-x-1 transition-all" />
</div>
</button>
)}
</For>
</div>
</div>
</div>
);
}

View File

@@ -0,0 +1,155 @@
/**
* PromptEnhancer - Clavix-inspired prompt optimization
*
* Source: https://github.com/ClavixDev/Clavix.git
*
* Takes a user's raw input and refines it into a precise,
* context-aware, actionable prompt using the session's configured model.
*/
import { getLogger } from "@/lib/logger";
import { sessions } from "@/stores/session-state";
const log = getLogger("prompt-enhancer");
// The meta-prompt based on Clavix CLEAR framework
const ENHANCEMENT_PROMPT = `You are an ELITE Software Architect and Prompt Engineer, powered by the "ThoughtBox" reasoning engine.
YOUR MISSION:
Transform the user's raw input into a "God-Tier" System Prompt—a comprehensive, execution-ready technical specification that a senior engineer could implement without further questions.
TARGET OUTPUT:
- Detailed, file-level architectural blueprint
- Explicit coding standards (TypeScript/solid-js/tailwindcss context implied)
- Comprehensive error handling and edge case strategy
- Step-by-step implementation plan
METHODOLOGY (ThoughtBox):
1. **Decode Intent**: What is the root problem? What is the *value*?
2. **Context Inference**: Assume a high-performance TypeScript/React/Electron environment. Infer necessary imports, stores, and services.
3. **Architectural Strategy**: Define the component hierarchy, state management (signals/stores), and side effects.
4. **Specification Generation**: Write the actual prompt.
OUTPUT FORMAT:
Return ONLY the enhanced prompt string, formatted as follows:
# 🎯 OBJECTIVE
[Concise, high-level goal]
# 🏗️ ARCHITECTURE & DESIGN
- **Files**: List exact file paths to touch/create.
- **Components**: Define props, state, and interfaces.
- **Data Flow**: Explain signal/store interactions.
# 🛡️ RESTRICTIONS & STANDARDS
- **Tech Stack**: TypeScript, SolidJS, TailwindCSS, Lucide Icons.
- **Rules**: NO placeholders, NO "todo", Strict Types, Accessibility-first.
- **Context**: [Infer from input, e.g., "Use ContextEngine for retrieval"]
# 📝 IMPLEMENTATION PLAN
1. [Step 1: Description]
2. [Step 2: Description]
...
# 💡 ORIGINAL REQUEST
"""
{INPUT}
"""
`;
/**
* Get the model configured for a session
*/
function getSessionModel(instanceId: string, sessionId: string): string {
try {
const instanceSessions = sessions().get(instanceId);
const session = instanceSessions?.get(sessionId);
if (session?.model?.modelId) {
return session.model.modelId;
}
} catch (e) {
log.warn("Could not get session model", e);
}
return "minimax-m1"; // Fallback
}
/**
* Enhance a user's prompt using the session's AI model
*/
export async function enhancePrompt(
userInput: string,
instanceId: string,
sessionId?: string
): Promise<string> {
if (!userInput.trim()) {
return userInput;
}
// Get the model from the session
const model = sessionId ? getSessionModel(instanceId, sessionId) : "minimax-m1";
log.info("Enhancing prompt...", { length: userInput.length, model });
try {
// Call the Ollama API for enhancement using the session's model
const response = await fetch("/api/ollama/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model,
messages: [
{
role: "user",
content: ENHANCEMENT_PROMPT.replace("{INPUT}", userInput)
}
],
stream: false
})
});
if (!response.ok) {
log.warn("Enhancement API failed, returning original", { status: response.status });
return userInput;
}
const data = await response.json();
const enhanced = data?.message?.content || data?.choices?.[0]?.message?.content;
if (!enhanced || enhanced.trim().length === 0) {
log.warn("Enhancement returned empty, using original");
return userInput;
}
log.info("Prompt enhanced successfully", {
originalLength: userInput.length,
enhancedLength: enhanced.length,
model
});
return enhanced.trim();
} catch (error) {
log.error("Prompt enhancement failed", error);
return userInput;
}
}
/**
* Get a quick suggestion for improving a prompt (synchronous hint)
*/
export function getQuickTips(userInput: string): string[] {
const tips: string[] = [];
if (userInput.length < 20) {
tips.push("Add more context for better results");
}
if (!userInput.includes("file") && !userInput.includes("function") && !userInput.includes("component")) {
tips.push("Mention specific files or functions if applicable");
}
if (!userInput.match(/\b(create|fix|update|add|remove|refactor)\b/i)) {
tips.push("Start with an action verb: create, fix, update, etc.");
}
return tips;
}

View File

@@ -0,0 +1,883 @@
/**
* MultiX v2 - Main Entry Point
*
* A complete rebuild of the MultiTaskChat component with:
* 1. Local signals + polling (no reactive cascade = no freeze)
* 2. 100% feature parity with original
* 3. New features: Context-Engine, Compaction, Prompt Enhancement
*/
import { createSignal, Show, onMount, For, onCleanup, batch } from "solid-js";
import toast from "solid-toast";
import { sessions, activeSessionId, setActiveSession } from "@/stores/session-state";
import { loadMessages, fetchSessions, flushSessionPersistence } from "@/stores/sessions";
import { sendMessage, compactSession, updateSessionAgent, updateSessionModelForSession, forceReset, abortSession } from "@/stores/session-actions";
import { addTask, setActiveTask, archiveTask } from "@/stores/task-actions";
import { messageStoreBus } from "@/stores/message-v2/bus";
import { formatTokenTotal } from "@/lib/formatters";
import { addToTaskQueue, getSoloState, setActiveTaskId, toggleAutonomous, toggleAutoApproval, toggleApex } from "@/stores/solo-store";
import { getLogger } from "@/lib/logger";
import { clearCompactionSuggestion, getCompactionSuggestion } from "@/stores/session-compaction";
import { emitSessionSidebarRequest } from "@/lib/session-sidebar-events";
import {
Command, Plus, PanelRight, ListTodo, AtSign, Hash, Mic, ArrowUp,
ChevronRight, Loader2, X, Zap, Layers, Sparkles, StopCircle, Key,
FileArchive, Paperclip, Wand2, Shield,
} from "lucide-solid";
// Using Lite versions to avoid reactive cascade
// import ModelSelector from "@/components/model-selector";
// import AgentSelector from "@/components/agent-selector";
import { DebugOverlay, setForceResetFn } from "@/components/debug-overlay";
import AttachmentChip from "@/components/attachment-chip";
import { createFileAttachment } from "@/types/attachment";
import type { InstanceMessageStore } from "@/stores/message-v2/instance-store";
import type { Task, Session } from "@/types/session";
// Sub-components
import { SimpleMessageBlock } from "./core/SimpleMessageBlock";
import { PipelineView } from "./features/PipelineView";
import { MessageNavSidebar } from "./features/MessageNavSidebar";
import { enhancePrompt } from "./features/PromptEnhancer";
import { LiteAgentSelector } from "./features/LiteAgentSelector";
import { LiteModelSelector } from "./features/LiteModelSelector";
import { LiteSkillsSelector } from "./features/LiteSkillsSelector";
import MessageBlockList from "@/components/message-block-list";
const OPEN_ADVANCED_SETTINGS_EVENT = "open-advanced-settings";
const log = getLogger("multix-v2");
interface MultiXV2Props {
instanceId: string;
sessionId: string;
}
export default function MultiXV2(props: MultiXV2Props) {
// ============================================================================
// LOCAL STATE (No reactive memos on stores - polling instead)
// ============================================================================
// Per-task sending state (Map of taskId -> boolean)
const [sendingTasks, setSendingTasks] = createSignal<Set<string>>(new Set());
const [chatInput, setChatInput] = createSignal("");
const [isCompacting, setIsCompacting] = createSignal(false);
const [attachments, setAttachments] = createSignal<ReturnType<typeof createFileAttachment>[]>([]);
const [userScrolling, setUserScrolling] = createSignal(false);
const [isEnhancing, setIsEnhancing] = createSignal(false);
// Cached store values - updated via polling
const [tasks, setTasks] = createSignal<Task[]>([]);
const [visibleTasks, setVisibleTasks] = createSignal<Task[]>([]);
const [selectedTaskId, setSelectedTaskIdLocal] = createSignal<string | null>(null);
const [messageIds, setMessageIds] = createSignal<string[]>([]);
const [cachedModelId, setCachedModelId] = createSignal("unknown");
const [cachedProviderId, setCachedProviderId] = createSignal("");
const [cachedAgent, setCachedAgent] = createSignal("");
const [cachedTokensUsed, setCachedTokensUsed] = createSignal(0);
const [cachedCost, setCachedCost] = createSignal(0);
const [isAgentThinking, setIsAgentThinking] = createSignal(false);
const [compactionSuggestion, setCompactionSuggestion] = createSignal<{ reason: string } | null>(null);
const [soloState, setSoloState] = createSignal({ isApex: false, isAutonomous: false, autoApproval: false, activeTaskId: null as string | null });
const [lastAssistantIndex, setLastAssistantIndex] = createSignal(-1);
const [bottomSentinel, setBottomSentinel] = createSignal<HTMLDivElement | null>(null);
const [hasUserSelection, setHasUserSelection] = createSignal(false);
const forcedLoadTimestamps = new Map<string, number>();
// Helper to check if CURRENT task is sending
const isSending = () => {
const taskId = selectedTaskId();
if (!taskId) return sendingTasks().size > 0; // If no task selected, check if any is sending
return sendingTasks().has(taskId);
};
// Helper to set sending state for a task
const setTaskSending = (taskId: string, sending: boolean) => {
setSendingTasks(prev => {
const next = new Set(prev);
if (sending) {
next.add(taskId);
} else {
next.delete(taskId);
}
return next;
});
};
let scrollContainer: HTMLDivElement | undefined;
let fileInputRef: HTMLInputElement | undefined;
// ============================================================================
// STORE ACCESS HELPERS (Non-reactive reads)
// ============================================================================
function getSession(): Session | undefined {
const instanceSessions = sessions().get(props.instanceId);
return instanceSessions?.get(props.sessionId);
}
function getMessageStore(): InstanceMessageStore {
return messageStoreBus.getOrCreate(props.instanceId);
}
function getSelectedTask(): Task | undefined {
return visibleTasks().find(t => t.id === selectedTaskId());
}
function getActiveTaskSessionId(): string {
const task = getSelectedTask();
return task?.taskSessionId || props.sessionId;
}
function getActiveTaskSession(): Session | undefined {
const sessionId = getActiveTaskSessionId();
const instanceSessions = sessions().get(props.instanceId);
return instanceSessions?.get(sessionId);
}
// ============================================================================
// POLLING-BASED SYNC (Updates local state from stores every 150ms)
// ============================================================================
function syncFromStore() {
try {
const session = getSession();
if (session) {
const allTasks = session.tasks || [];
setTasks(allTasks);
setVisibleTasks(allTasks.filter(t => !t.archived));
// NOTE: Don't overwrite selectedTaskId from store - local state is authoritative
// This prevents the reactive cascade when the store updates
if (!selectedTaskId() && !hasUserSelection() && allTasks.length > 0) {
const preferredId = session.activeTaskId || allTasks[0].id;
setSelectedTaskIdLocal(preferredId);
}
}
// Get message IDs for currently selected task
const currentTaskId = selectedTaskId();
if (currentTaskId) {
const task = visibleTasks().find(t => t.id === currentTaskId);
if (task) {
const store = getMessageStore();
if (task.taskSessionId) {
const cachedIds = store.getSessionMessageIds(task.taskSessionId);
if (cachedIds.length === 0) {
const lastForced = forcedLoadTimestamps.get(task.taskSessionId) ?? 0;
if (Date.now() - lastForced > 1000) {
forcedLoadTimestamps.set(task.taskSessionId, Date.now());
loadMessages(props.instanceId, task.taskSessionId, true).catch((error) =>
log.error("Failed to load task session messages", error)
);
}
} else {
loadMessages(props.instanceId, task.taskSessionId).catch((error) =>
log.error("Failed to load task session messages", error)
);
}
setMessageIds(store.getSessionMessageIds(task.taskSessionId));
} else {
setMessageIds(task.messageIds || []);
}
} else {
setMessageIds([]);
}
} else {
setMessageIds([]);
}
const taskSession = getActiveTaskSession();
if (taskSession?.model?.modelId) {
setCachedModelId(taskSession.model.modelId);
}
if (taskSession?.model?.providerId) {
setCachedProviderId(taskSession.model.providerId);
}
if (taskSession?.agent) {
setCachedAgent(taskSession.agent);
}
const store = getMessageStore();
const usage = store.getSessionUsage(props.sessionId);
if (usage) {
setCachedTokensUsed(usage.actualUsageTokens ?? 0);
setCachedCost(usage.totalCost ?? 0);
}
const ids = messageIds();
if (ids.length > 0) {
const lastMsg = store.getMessage(ids[ids.length - 1]);
setIsAgentThinking(
lastMsg?.role === "assistant" &&
(lastMsg.status === "streaming" || lastMsg.status === "sending")
);
// Calculate lastAssistantIndex
let lastIdx = -1;
for (let i = ids.length - 1; i >= 0; i--) {
const msg = store.getMessage(ids[i]);
if (msg?.role === "assistant") {
lastIdx = i;
break;
}
}
setLastAssistantIndex(lastIdx);
} else {
setIsAgentThinking(false);
setLastAssistantIndex(-1);
}
const suggestion = getCompactionSuggestion(props.instanceId, getActiveTaskSessionId());
setCompactionSuggestion(suggestion);
setSoloState(getSoloState(props.instanceId));
} catch (e) {
log.error("syncFromStore error", e);
}
}
// ============================================================================
// LIFECYCLE
// ============================================================================
onMount(() => {
setForceResetFn(() => {
forceReset();
// Clear all sending states on force reset
setSendingTasks(new Set<string>());
});
// Initialize
loadMessages(props.instanceId, props.sessionId);
fetchSessions(props.instanceId);
syncFromStore();
const interval = setInterval(syncFromStore, 150);
const handleScroll = () => {
if (!scrollContainer) return;
const isAtBottom = scrollContainer.scrollHeight - scrollContainer.scrollTop - scrollContainer.clientHeight < 50;
setUserScrolling(!isAtBottom);
};
scrollContainer?.addEventListener('scroll', handleScroll, { passive: true });
onCleanup(() => {
clearInterval(interval);
scrollContainer?.removeEventListener('scroll', handleScroll);
// Ensure any pending task updates are saved immediately before we potentially reload them
flushSessionPersistence(props.instanceId);
});
});
// ============================================================================
// ACTIONS
// ============================================================================
const scrollToBottom = () => {
if (scrollContainer && !userScrolling()) {
scrollContainer.scrollTop = scrollContainer.scrollHeight;
}
};
const setSelectedTaskId = (id: string | null) => {
// Update local state immediately (fast)
setSelectedTaskIdLocal(id);
setHasUserSelection(true);
// Immediately sync to load the new task's agent/model
syncFromStore();
// Defer the global store update using idle callback (non-blocking)
if (typeof requestIdleCallback !== 'undefined') {
requestIdleCallback(() => {
setActiveTask(props.instanceId, props.sessionId, id || undefined);
}, { timeout: 500 });
} else {
// Fallback: use setTimeout with longer delay
setTimeout(() => {
setActiveTask(props.instanceId, props.sessionId, id || undefined);
}, 50);
}
};
const handleSendMessage = async () => {
const message = chatInput().trim();
if (!message) return;
// Check if THIS specific task is already sending
const currentTaskId = selectedTaskId();
if (currentTaskId && sendingTasks().has(currentTaskId)) return;
const currentMessage = message;
const currentAttachments = attachments();
batch(() => {
setChatInput("");
setAttachments([]);
});
// Track which task we're sending for (might be created below)
let taskIdForSending: string | null = null;
try {
let taskId = currentTaskId;
let targetSessionId = props.sessionId;
if (!taskId) {
// Create new task
const title = currentMessage.length > 30 ? currentMessage.substring(0, 27) + "..." : currentMessage;
log.info("[MultiX] Creating task...", { title });
const result = await addTask(props.instanceId, props.sessionId, title);
taskId = result.id;
targetSessionId = result.taskSessionId || props.sessionId;
log.info("[MultiX] Task created", { taskId, targetSessionId, hasTaskSession: !!result.taskSessionId });
// Immediately sync to get the new task in our local state
syncFromStore();
// Set the selected task
setSelectedTaskId(taskId);
const s = soloState();
if (s.isAutonomous) {
if (!s.activeTaskId) {
setActiveTaskId(props.instanceId, taskId);
} else {
addToTaskQueue(props.instanceId, taskId);
}
}
} else {
// Existing task - get up-to-date task info
syncFromStore();
const task = visibleTasks().find(t => t.id === taskId);
targetSessionId = task?.taskSessionId || props.sessionId;
log.info("[MultiX] Existing task", { taskId, targetSessionId });
}
// Mark THIS task as sending
taskIdForSending = taskId;
setTaskSending(taskId, true);
log.info("[MultiX] Sending message", { instanceId: props.instanceId, targetSessionId, messageLength: currentMessage.length, taskId });
// Send the message (this is async and will stream)
await sendMessage(props.instanceId, targetSessionId, currentMessage, currentAttachments, taskId || undefined);
log.info("[MultiX] Message sent successfully");
// Force sync after message is sent to pick up the new messages
setTimeout(() => syncFromStore(), 100);
setTimeout(() => syncFromStore(), 500);
setTimeout(() => syncFromStore(), 1000);
setTimeout(scrollToBottom, 150);
} catch (error) {
log.error("Send failed:", error);
console.error("[MultiX] Send failed:", error);
} finally {
// Clear sending state for this specific task
if (taskIdForSending) {
setTaskSending(taskIdForSending, false);
}
}
};
const handleCreateTask = () => {
// Allow creating new tasks even when other tasks are processing
const nextIndex = tasks().length + 1;
const title = `Task ${nextIndex} `;
setTimeout(async () => {
try {
const result = await addTask(props.instanceId, props.sessionId, title);
setSelectedTaskId(result.id);
setTimeout(() => syncFromStore(), 50);
} catch (error) {
log.error("handleCreateTask failed", error);
}
}, 0);
};
const handleKeyDown = (e: KeyboardEvent) => {
if (e.key === "Enter" && !e.shiftKey) {
e.preventDefault();
handleSendMessage();
}
};
const handleStopAgent = async (e?: MouseEvent) => {
if (e?.shiftKey) {
forceReset();
// Clear all sending states on force reset
setSendingTasks(new Set<string>());
return;
}
const task = getSelectedTask();
// If no task selected, we might be in global pipeline, use sessionId
const targetSessionId = task?.taskSessionId || props.sessionId;
const taskId = task?.id || selectedTaskId();
try {
await abortSession(props.instanceId, targetSessionId);
// Manually force UI update
if (taskId) {
setTaskSending(taskId, false);
}
setIsAgentThinking(false);
setTimeout(() => syncFromStore(), 50);
} catch (error) {
log.error("Failed to stop agent", error);
}
};
const handleCompact = async () => {
const targetSessionId = getActiveTaskSessionId();
if (isCompacting()) return;
// Get message count to verify we have messages to compact
const store = getMessageStore();
const msgIds = store.getSessionMessageIds(targetSessionId);
log.info("[MultiX] Starting compaction", {
instanceId: props.instanceId,
sessionId: targetSessionId,
messageCount: msgIds.length
});
if (msgIds.length < 3) {
log.info("[MultiX] Session too small to compact", { count: msgIds.length });
toast.success("Session is already concise. No compaction needed.", {
icon: <Zap size={14} class="text-amber-400" />
});
return;
}
setIsCompacting(true);
const toastId = toast.loading("Compacting session history...");
try {
clearCompactionSuggestion(props.instanceId, targetSessionId);
const result = await compactSession(props.instanceId, targetSessionId);
// CRITICAL: Restore the parent session as active to prevent navigation away from MultiX
const currentActive = activeSessionId().get(props.instanceId);
if (currentActive !== props.sessionId) {
setActiveSession(props.instanceId, props.sessionId);
}
log.info("[MultiX] Compaction complete", {
success: result.success,
tokenBefore: result.token_before,
tokenAfter: result.token_after,
reduction: result.token_reduction_pct
});
toast.success(`Compacted! Reduced by ${result.token_reduction_pct}% (${result.token_after} tokens)`, {
id: toastId,
duration: 4000
});
// Sync to update UI after compaction
syncFromStore();
} catch (error) {
log.error("Failed to compact session", error);
toast.error("Compaction failed. Please try again.", { id: toastId });
} finally {
setIsCompacting(false);
}
};
const handleOpenAdvancedSettings = () => {
window.dispatchEvent(new CustomEvent(OPEN_ADVANCED_SETTINGS_EVENT, {
detail: { instanceId: props.instanceId, sessionId: props.sessionId }
}));
};
const handleEnhancePrompt = async () => {
const input = chatInput().trim();
if (!input || isEnhancing()) return;
setIsEnhancing(true);
try {
// Pass sessionId so it uses the task's configured model
const taskSessionId = getActiveTaskSessionId();
const enhanced = await enhancePrompt(input, props.instanceId, taskSessionId);
setChatInput(enhanced);
} catch (error) {
log.error("Prompt enhancement failed", error);
} finally {
setIsEnhancing(false);
}
};
const toggleApexPro = () => {
const s = soloState();
const currentState = s.isAutonomous && s.autoApproval;
if (currentState) {
if (s.isAutonomous) toggleAutonomous(props.instanceId);
if (s.autoApproval) toggleAutoApproval(props.instanceId);
} else {
if (!s.isAutonomous) toggleAutonomous(props.instanceId);
if (!s.autoApproval) toggleAutoApproval(props.instanceId);
}
};
const isApexPro = () => {
const s = soloState();
return s.isAutonomous && s.autoApproval;
};
const handleArchiveTask = (taskId: string) => {
archiveTask(props.instanceId, props.sessionId, taskId);
};
const addAttachment = (attachment: ReturnType<typeof createFileAttachment>) => {
setAttachments((prev) => [...prev, attachment]);
};
const removeAttachment = (attachmentId: string) => {
setAttachments((prev) => prev.filter((item) => item.id !== attachmentId));
};
const handleFileSelect = (event: Event) => {
const input = event.currentTarget as HTMLInputElement;
if (!input.files || input.files.length === 0) return;
Array.from(input.files).forEach((file) => {
const reader = new FileReader();
reader.onload = () => {
const buffer = reader.result instanceof ArrayBuffer ? reader.result : null;
const data = buffer ? new Uint8Array(buffer) : undefined;
const attachment = createFileAttachment(file.name, file.name, file.type || "application/octet-stream", data);
if (file.type.startsWith("image/") && typeof reader.result === "string") {
attachment.url = reader.result;
}
addAttachment(attachment);
};
reader.readAsArrayBuffer(file);
});
input.value = "";
};
const handleTabClick = (messageId: string) => {
const anchorId = `message-anchor-${messageId}`;
const element = scrollContainer?.querySelector(`#${anchorId}`);
if (element) {
element.scrollIntoView({ behavior: "smooth", block: "center" });
element.classList.add("message-highlight");
setTimeout(() => element.classList.remove("message-highlight"), 2000);
}
};
// ============================================================================
// RENDER (Gemini 3 Pro)
// ============================================================================
return (
<div class="absolute inset-0 flex flex-col bg-[#0a0a0b] text-zinc-300 font-sans selection:bg-indigo-500/30 overflow-hidden">
<DebugOverlay />
{/* ===== GEMINI 3 PRO HEADER ===== */}
<header class="h-12 px-2 flex items-center justify-between bg-[#0a0a0b]/90 backdrop-blur-xl border-b border-white/5 relative z-30 shrink-0 select-none">
<div class="flex items-center gap-2 overflow-hidden flex-1">
{/* Brand / Mode Indicator */}
<div class="flex items-center gap-2 px-2 py-1 rounded-md text-zinc-400">
<Layers size={14} class="text-indigo-500" />
<span class="text-[11px] font-bold tracking-wider text-zinc-300">MULTIX</span>
</div>
<div class="h-4 w-px bg-white/5 shrink-0" />
{/* Pipeline / Task Switcher */}
<div class="flex items-center gap-1 overflow-x-auto no-scrollbar mask-linear-fade">
{/* Pipeline Tab */}
<button
onClick={() => setSelectedTaskId(null)}
class={`flex items-center gap-2 px-3 py-1.5 rounded-lg text-[10px] font-bold uppercase tracking-wider transition-all border ${!selectedTaskId()
? "bg-indigo-500/10 text-indigo-400 border-indigo-500/20 shadow-[0_0_10px_rgba(99,102,241,0.1)]"
: "text-zinc-500 border-transparent hover:text-zinc-300 hover:bg-white/5"
}`}
>
<span class="font-mono">PIPELINE</span>
</button>
{/* Active Tasks */}
<For each={visibleTasks()}>
{(task) => (
<button
onClick={() => setSelectedTaskId(task.id)}
class={`group flex items-center gap-2 px-3 py-1.5 rounded-lg text-[10px] font-bold transition-all border max-w-[140px] ${selectedTaskId() === task.id
? "bg-zinc-800 text-zinc-100 border-zinc-700 shadow-lg"
: "text-zinc-500 border-transparent hover:text-zinc-300 hover:bg-white/5"
} ${task.title.toLowerCase().includes("smart fix") ? "smart-fix-highlight" : ""}`}
>
<div class={`w-1.5 h-1.5 rounded-full ${task.status === "completed" ? "bg-emerald-500" :
task.status === "interrupted" ? "bg-rose-500" :
"bg-indigo-500 animate-pulse"
}`} />
<span class="truncate">{task.title}</span>
<span
onClick={(e) => { e.stopPropagation(); handleArchiveTask(task.id); }}
class="opacity-0 group-hover:opacity-100 hover:text-red-400 transition-opacity"
>
<X size={10} />
</span>
</button>
)}
</For>
{/* New Task */}
<button
onClick={handleCreateTask}
class="w-6 h-6 flex items-center justify-center rounded-md text-zinc-600 hover:text-zinc-200 hover:bg-white/5 transition-colors"
>
<Plus size={14} />
</button>
</div>
</div>
{/* Right Actions */}
<div class="flex items-center gap-2 shrink-0 pl-4">
{/* Stream Status */}
<Show when={isAgentThinking()}>
<div class="flex items-center gap-2 px-2 py-1 rounded-full bg-violet-500/10 border border-violet-500/20">
<Loader2 size={10} class="animate-spin text-violet-400" />
<span class="text-[9px] font-mono text-violet-300">{formatTokenTotal(cachedTokensUsed())}</span>
</div>
</Show>
<div class="h-4 w-px bg-white/5" />
{/* Tools */}
<button
onClick={handleCompact}
disabled={!selectedTaskId()}
class="p-1.5 text-zinc-500 hover:text-zinc-200 hover:bg-white/5 rounded-md transition-colors disabled:opacity-30"
title="Compact Context"
>
<FileArchive size={14} />
</button>
<button
onClick={() => emitSessionSidebarRequest({ instanceId: props.instanceId, action: "show-skills" })}
class="p-1.5 text-zinc-500 hover:text-indigo-300 hover:bg-indigo-500/10 rounded-md transition-colors"
title="Skills"
>
<Sparkles size={14} />
</button>
</div>
</header>
{/* ===== AGENT/MODEL SELECTORS (LITE VERSIONS - PER TASK) ===== */}
<Show when={getSelectedTask()}>
<div class="px-4 py-3 border-b border-white/5 bg-[#0a0a0b]">
<div class="grid grid-cols-1 md:grid-cols-2 gap-2">
<LiteAgentSelector
instanceId={props.instanceId}
sessionId={getActiveTaskSessionId()}
currentAgent={cachedAgent()}
onAgentChange={(agent) => {
// Update the TASK's session, not a global cache
const taskSessionId = getActiveTaskSessionId();
log.info("[MultiX] Changing agent for task session", { taskSessionId, agent });
updateSessionAgent(props.instanceId, taskSessionId, agent);
// Force immediate sync to reflect the change
setTimeout(() => syncFromStore(), 50);
}}
/>
<LiteModelSelector
instanceId={props.instanceId}
sessionId={getActiveTaskSessionId()}
currentModel={{ providerId: cachedProviderId(), modelId: cachedModelId() }}
onModelChange={(model) => {
// Update the TASK's session, not a global cache
const taskSessionId = getActiveTaskSessionId();
log.info("[MultiX] Changing model for task session", { taskSessionId, model });
// Immediately update cached values for responsive UI
setCachedModelId(model.modelId);
setCachedProviderId(model.providerId);
updateSessionModelForSession(props.instanceId, taskSessionId, model);
// Force immediate sync to reflect the change
setTimeout(() => syncFromStore(), 50);
}}
/>
<LiteSkillsSelector
instanceId={props.instanceId}
sessionId={getActiveTaskSessionId()}
/>
</div>
</div>
</Show>
{/* ===== MAIN CONTENT AREA (Row Layout) ===== */}
<div class="flex-1 flex flex-row min-h-0 relative bg-[#050505] overflow-hidden w-full h-full">
{/* Chat Column */}
<div class="flex-1 min-h-0 flex flex-col overflow-hidden relative">
<div ref={scrollContainer} class="flex-1 min-h-0 overflow-y-auto overflow-x-hidden custom-scrollbar">
{/* Compaction Suggestion Banner */}
<Show when={compactionSuggestion()}>
<div class="mx-3 mt-3 mb-1 rounded-xl border border-emerald-500/30 bg-emerald-500/10 px-3 py-2 text-[11px] text-emerald-200 flex items-center justify-between gap-3">
<span class="font-semibold">Compact suggested: {compactionSuggestion()?.reason}</span>
<button
type="button"
class="px-2.5 py-1 rounded-lg text-[10px] font-bold uppercase tracking-wide bg-emerald-500/20 border border-emerald-500/40 text-emerald-200 hover:bg-emerald-500/30 transition-colors"
onClick={handleCompact}
>
Compact now
</button>
</div>
</Show>
<Show when={!selectedTaskId()} fallback={
/* Message List - Using full MessageBlockList for proper streaming */
<div class="min-h-full pb-4">
<MessageBlockList
instanceId={props.instanceId}
sessionId={getActiveTaskSessionId()}
store={getMessageStore}
messageIds={() => messageIds()}
lastAssistantIndex={() => lastAssistantIndex()}
showThinking={() => true}
thinkingDefaultExpanded={() => true}
showUsageMetrics={() => true}
scrollContainer={() => scrollContainer}
setBottomSentinel={setBottomSentinel}
/>
{/* Bottom anchor */}
<div id="bottom-anchor" class="h-10 w-full" />
</div>
}>
{/* Pipeline View */}
<PipelineView
visibleTasks={visibleTasks}
onTaskClick={setSelectedTaskId}
onArchiveTask={handleArchiveTask}
/>
</Show>
</div>
{/* ===== INPUT AREA ===== */}
<div class="p-4 bg-[#0a0a0b] border-t border-white/5 shrink-0 z-20">
{/* Input Container */}
<div class="w-full bg-zinc-900/50 border border-white/10 rounded-2xl shadow-sm overflow-hidden focus-within:border-indigo-500/30 transition-all">
{/* Input Header Row */}
<div class="flex items-center justify-between px-3 pt-2 pb-1">
<div class="flex items-center space-x-2">
<div class="flex flex-col">
<span class="text-[10px] font-bold text-zinc-400 uppercase tracking-wide">
{selectedTaskId() ? "Task Context" : "Global Pipeline"}
</span>
</div>
</div>
<div class="flex items-center space-x-1">
{/* APEX / Shield Toggles */}
<button
onClick={() => toggleApex(props.instanceId)}
title="Apex"
class={`p-1 rounded transition-colors ${soloState().isApex ? "text-rose-400 bg-rose-500/10" : "text-zinc-600 hover:text-zinc-400"}`}
>
<Zap size={10} />
</button>
<button
onClick={() => toggleAutoApproval(props.instanceId)}
title="Shield"
class={`p-1 rounded transition-colors ${soloState().autoApproval ? "text-emerald-400 bg-emerald-500/10" : "text-zinc-600 hover:text-zinc-400"}`}
>
<Shield size={10} />
</button>
</div>
</div>
{/* Attachments */}
<Show when={attachments().length > 0}>
<div class="flex flex-wrap gap-1.5 px-3 py-1">
<For each={attachments()}>
{(attachment) => (
<AttachmentChip
attachment={attachment}
onRemove={() => removeAttachment(attachment.id)}
/>
)}
</For>
</div>
</Show>
{/* Text Input */}
<textarea
value={chatInput()}
onInput={(e) => {
setChatInput(e.currentTarget.value);
e.currentTarget.style.height = "auto";
e.currentTarget.style.height = e.currentTarget.scrollHeight + "px";
}}
onKeyDown={handleKeyDown}
placeholder={selectedTaskId() ? "Message agent..." : "Start a new task..."}
class="w-full bg-transparent text-zinc-200 placeholder-zinc-500 text-sm p-3 outline-none resize-none max-h-[300px] min-h-[44px]"
rows={1}
disabled={isSending()}
/>
{/* Toolbar */}
<div class="flex items-center justify-between px-2 pb-2 mt-1 border-t border-white/5 pt-2 bg-zinc-900/30">
<div class="flex items-center space-x-1">
<input
ref={fileInputRef}
type="file"
multiple
class="hidden"
onChange={handleFileSelect}
/>
<button
onClick={() => fileInputRef?.click()}
class="p-1.5 text-zinc-500 hover:text-zinc-300 rounded hover:bg-white/5 transition-colors"
>
<Paperclip size={14} />
</button>
<button
onClick={handleEnhancePrompt}
disabled={!chatInput().trim() || isEnhancing()}
class={`p-1.5 rounded hover:bg-white/5 transition-colors ${isEnhancing() ? "text-amber-400 animate-pulse" : "text-zinc-500 hover:text-amber-300"}`}
>
<Wand2 size={14} class={isEnhancing() ? "animate-spin" : ""} />
</button>
</div>
<div class="flex items-center space-x-2">
<div class="text-[9px] text-zinc-600 font-mono hidden md:block">
{cachedModelId()}
</div>
{/* Stop Button (visible when agent is thinking) */}
<Show when={isAgentThinking() || isSending()}>
<button
onClick={handleStopAgent}
class="p-1.5 bg-rose-500/20 hover:bg-rose-500/30 text-rose-400 border border-rose-500/30 rounded-lg transition-all shadow-sm"
title="Stop Agent (Shift+Click = Force Reset)"
>
<StopCircle size={14} strokeWidth={2.5} />
</button>
</Show>
{/* Send Button */}
<button
onClick={handleSendMessage}
disabled={(!chatInput().trim() && attachments().length === 0) || isSending()}
class="p-1.5 bg-zinc-100 hover:bg-white text-black rounded-lg disabled:opacity-50 disabled:cursor-not-allowed transition-all shadow-sm"
>
<Show when={isSending()} fallback={<ArrowUp size={14} strokeWidth={3} />}>
<Loader2 size={14} class="animate-spin" />
</Show>
</button>
</div>
</div>
</div>
</div>
</div>
{/* Sidebar (Right) */}
<Show when={selectedTaskId() && messageIds().length > 0}>
<MessageNavSidebar
messageIds={messageIds}
store={getMessageStore}
scrollContainer={scrollContainer}
onTabClick={handleTabClick}
/>
</Show>
</div>
</div>
);
}

View File

@@ -0,0 +1,100 @@
import { createSignal, onMount, onCleanup, Show } from "solid-js"
// Simple debug log storage (no reactive overhead)
export function addDebugLog(message: string, level: "info" | "warn" | "error" = "info") {
// Disabled - no-op for performance
}
// HARD STOP function - forces page reload
function hardStop() {
console.warn("HARD STOP triggered - reloading page")
window.location.reload()
}
// Force reset function import placeholder
let forceResetFn: (() => void) | null = null
export function setForceResetFn(fn: () => void) {
forceResetFn = fn
}
export function DebugOverlay() {
const [visible, setVisible] = createSignal(false)
// Toggle with Ctrl+Shift+D
onMount(() => {
const handleKeyDown = (e: KeyboardEvent) => {
if (e.ctrlKey && e.shiftKey && e.key === "D") {
setVisible((v) => !v)
}
}
window.addEventListener("keydown", handleKeyDown)
onCleanup(() => window.removeEventListener("keydown", handleKeyDown))
})
return (
<Show when={visible()}>
<div
style={{
position: "fixed",
top: "10px",
right: "10px",
"z-index": "99999",
background: "rgba(0,0,0,0.9)",
color: "#fff",
padding: "12px",
"border-radius": "8px",
"font-family": "monospace",
"font-size": "11px",
"min-width": "200px",
border: "1px solid #333",
"pointer-events": "auto",
}}
>
<div style={{ "margin-bottom": "8px", "font-weight": "bold" }}>
DEBUG PANEL (Ctrl+Shift+D to toggle)
</div>
<div style={{ display: "flex", gap: "8px" }}>
<button
onClick={() => {
if (forceResetFn) forceResetFn()
}}
style={{
background: "#f59e0b",
color: "#000",
border: "none",
padding: "6px 12px",
"border-radius": "4px",
cursor: "pointer",
"font-weight": "bold",
"font-size": "10px",
}}
>
RESET UI
</button>
<button
onClick={hardStop}
style={{
background: "#ef4444",
color: "#fff",
border: "none",
padding: "6px 12px",
"border-radius": "4px",
cursor: "pointer",
"font-weight": "bold",
"font-size": "10px",
}}
>
HARD RELOAD
</button>
</div>
<div style={{ "margin-top": "8px", "font-size": "9px", color: "#888" }}>
If stuck: Click HARD RELOAD or press F5
</div>
</div>
</Show>
)
}

View File

@@ -295,7 +295,7 @@ const FolderSelectionView: Component<FolderSelectionViewProps> = (props) => {
<img src={nomadArchLogo} alt="NomadArch logo" class="h-32 w-auto sm:h-48" loading="lazy" />
</div>
<h1 class="mb-2 text-3xl font-semibold text-primary">NomadArch</h1>
<p class="text-xs text-muted mb-1">Forked from OpenCode</p>
<p class="text-xs text-muted mb-1">An enhanced fork of CodeNomad</p>
<Show when={activeUser()}>
{(user) => (
<p class="text-xs text-muted mb-1">

View File

@@ -10,6 +10,7 @@ import {
type Accessor,
type Component,
} from "solid-js"
import toast from "solid-toast"
import type { ToolState } from "@opencode-ai/sdk"
import { Accordion } from "@kobalte/core"
import { ChevronDown } from "lucide-solid"
@@ -35,9 +36,13 @@ import {
getSessionInfo,
sessions,
setActiveSession,
setActiveParentSession,
executeCustomCommand,
sendMessage,
runShellCommand,
} from "../../stores/sessions"
import { compactSession } from "../../stores/session-actions";
import { addTask, setActiveTask } from "../../stores/task-actions"
import { keyboardRegistry, type KeyboardShortcut } from "../../lib/keyboard-registry"
import { messageStoreBus } from "../../stores/message-v2/bus"
import { clearSessionRenderCache } from "../message-block"
@@ -54,14 +59,15 @@ import ModelSelector from "../model-selector"
import ModelStatusSelector from "../model-status-selector"
import CommandPalette from "../command-palette"
import Kbd from "../kbd"
import MultiTaskChat from "../chat/multi-task-chat"
// Using rebuilt MultiX v2 with polling architecture (no freeze)
import MultiTaskChat from "../chat/multix-v2"
import { TodoListView } from "../tool-call/renderers/todo"
import ContextUsagePanel from "../session/context-usage-panel"
import SessionView from "../session/session-view"
import { Sidebar, type FileNode } from "./sidebar"
import { Editor } from "./editor"
import { serverApi } from "../../lib/api-client"
import { Sparkles, Layout as LayoutIcon, Terminal as TerminalIcon, Search, Loader2, Zap, Shield, Settings } from "lucide-solid"
import { Sparkles, Layout as LayoutIcon, Terminal as TerminalIcon, Search, Loader2, Zap, Shield, Settings, FileArchive, ArrowLeft } from "lucide-solid"
import { formatTokenTotal } from "../../lib/formatters"
import { sseManager } from "../../lib/sse-manager"
import { getLogger } from "../../lib/logger"
@@ -159,18 +165,32 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
const [selectedBinary, setSelectedBinary] = createSignal("opencode")
// Handler to load file content when selected
createEffect(() => {
if (typeof window !== "undefined") {
(window as any).ACTIVE_INSTANCE_ID = props.instance.id;
}
});
const handleFileSelect = async (file: FileNode) => {
try {
const response = await serverApi.readWorkspaceFile(props.instance.id, file.path)
const language = file.name.split('.').pop() || 'text'
setCurrentFile({
const updatedFile = {
...file,
content: response.contents,
language,
})
}
setCurrentFile(updatedFile)
// If it's a previewable file, update the preview URL
if (file.name.endsWith('.html') || file.name.endsWith('.htm')) {
const origin = typeof window !== "undefined" ? window.location.origin : "http://localhost:3000"
const apiOrigin = origin.replace(":3000", ":9898")
const url = `${apiOrigin}/api/workspaces/${props.instance.id}/serve/${file.path}`
setPreviewUrl(url)
}
} catch (error) {
log.error('Failed to read file content', error)
// Still show the file but without content
setCurrentFile(file)
}
}
@@ -292,21 +312,55 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
if (typeof window === "undefined") return
const handler = async (event: Event) => {
const detail = (event as CustomEvent<{ url?: string; instanceId?: string }>).detail
console.log(`[InstanceShell2] Received BUILD_PREVIEW_EVENT`, {
detail,
currentInstanceId: props.instance.id,
match: detail?.instanceId === props.instance.id
});
if (!detail || detail.instanceId !== props.instance.id || !detail.url) return
setPreviewUrl(detail.url)
const confirmed = await showConfirmDialog(`Preview available at ${detail.url}. Open now?`, {
title: "Preview ready",
confirmLabel: "Open preview",
cancelLabel: "Later",
})
if (confirmed) {
// Auto-switch to preview mode for new AI content
setCenterTab("preview")
}
toast.success("Preview updated", {
icon: '🚀',
duration: 3000,
position: 'bottom-center'
})
}
window.addEventListener(BUILD_PREVIEW_EVENT, handler)
onCleanup(() => window.removeEventListener(BUILD_PREVIEW_EVENT, handler))
})
onMount(() => {
if (typeof window === "undefined") return
const handler = async (event: Event) => {
const detail = (event as CustomEvent<{ code: string; fileName: string | null; instanceId: string }>).detail
if (!detail || detail.instanceId !== props.instance.id) return
if (detail.fileName) {
const origin = window.location.origin
const apiOrigin = origin.includes(":3000") ? origin.replace(":3000", ":9898") : origin
const url = `${apiOrigin}/api/workspaces/${props.instance.id}/serve/${detail.fileName}`
setPreviewUrl(url)
} else {
const blob = new Blob([detail.code], { type: 'text/html' })
const url = URL.createObjectURL(blob)
setPreviewUrl(url)
}
setCenterTab("preview")
toast.success("Previewing code block", {
icon: '🔍',
duration: 2000,
position: 'bottom-center'
})
}
window.addEventListener("MANUAL_PREVIEW_EVENT", handler)
onCleanup(() => window.removeEventListener("MANUAL_PREVIEW_EVENT", handler))
})
createEffect(() => {
if (typeof window === "undefined") return
window.localStorage.setItem(LEFT_DRAWER_STORAGE_KEY, sessionSidebarWidth().toString())
@@ -402,23 +456,90 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
showCommandPalette(props.instance.id)
}
/* Compact Logic */
const [isCompacting, setIsCompacting] = createSignal(false);
const handleCompact = async () => {
const sessionId = activeSessionIdForInstance();
if (!sessionId || sessionId === "info" || isCompacting()) return;
setIsCompacting(true);
const toastId = toast.loading("Compacting...", { icon: <FileArchive class="animate-pulse text-indigo-400" /> });
try {
await compactSession(props.instance.id, sessionId);
toast.success("Session compacted!", { id: toastId });
} catch (e) {
toast.error("Failed to compact", { id: toastId });
} finally {
setIsCompacting(false);
}
}
const [isFixing, setIsFixing] = createSignal(false)
const [isBuilding, setIsBuilding] = createSignal(false)
const handleSmartFix = async () => {
const sessionId = activeSessionIdForInstance()
if (!sessionId || sessionId === "info" || isFixing()) {
const parentSessionId = activeSessionIdForInstance()
if (!parentSessionId || parentSessionId === "info" || isFixing()) {
return
}
setIsFixing(true)
const toastId = toast.loading("Smart Fix: Creating analysis task...", {
icon: <Sparkles class="text-indigo-400 animate-spin" />
});
try {
// Smart Fix targets the active task if available, otherwise general fix
const session = activeSessionForInstance()
const activeTaskId = session?.activeTaskId
const args = activeTaskId ? `task:${activeTaskId}` : ""
// ALWAYS create a dedicated "Smart Fix" task in the MultiX pipeline
// This ensures the analysis and fixes appear in their own tab
const timestamp = new Date().toLocaleTimeString('en-US', { hour: '2-digit', minute: '2-digit' })
const taskResult = await addTask(
props.instance.id,
parentSessionId,
`🔧 Smart Fix ${timestamp}`
)
await executeCustomCommand(props.instance.id, sessionId, "fix", args)
const targetSessionId = taskResult.taskSessionId || parentSessionId
const taskId = taskResult.id
// Set this as the active task so the user sees it immediately
setActiveTask(props.instance.id, parentSessionId, taskId)
toast.loading("Analyzing project...", { id: toastId });
// Use sendMessage to force visible feedback in the chat stream
// Prompt enforces: Report → Plan → Approval → Execute workflow
const smartFixPrompt = `**Smart Fix Analysis Request**
Please analyze this project for errors, bugs, warnings, or potential improvements.
**Your response MUST follow this exact format:**
1. **ANALYSIS RESULTS:**
- If NO errors/issues found: Clearly state "✅ No errors or issues detected in the project."
- If errors/issues ARE found: List each issue with file path and line number if applicable.
2. **FIX PLAN (only if issues found):**
For each issue, outline:
- What the problem is
- How you will fix it
- Which files will be modified
3. **AWAIT APPROVAL:**
After presenting the plan, explicitly ask: "Do you approve this fix plan? Reply 'yes' to proceed, or provide feedback for adjustments."
4. **EXECUTION (only after I say 'yes'):**
Only apply fixes after receiving explicit approval. Use write_file tool to make changes.
Now analyze the project and report your findings.`
await sendMessage(
props.instance.id,
targetSessionId,
smartFixPrompt,
[],
taskId
)
toast.success("Smart Fix task created. Check the pipeline.", { id: toastId, duration: 3000 });
// Auto-open right panel to show agent progress if it's not open
if (!rightOpen()) {
@@ -427,6 +548,7 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
}
} catch (error) {
log.error("Failed to run Smart Fix command", error)
toast.error("Smart Fix failed to start", { id: toastId });
} finally {
setTimeout(() => setIsFixing(false), 2000) // Reset after delay
}
@@ -562,7 +684,25 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
})
const handleSessionSelect = (sessionId: string) => {
if (sessionId === "info") {
setActiveSession(props.instance.id, sessionId)
return
}
const instanceSessions = sessions().get(props.instance.id)
const session = instanceSessions?.get(sessionId)
if (session?.parentId) {
setActiveParentSession(props.instance.id, session.parentId)
const parentSession = instanceSessions?.get(session.parentId)
const matchingTask = parentSession?.tasks?.find((task) => task.taskSessionId === sessionId)
if (matchingTask) {
setActiveTask(props.instance.id, session.parentId, matchingTask.id)
}
return
}
setActiveParentSession(props.instance.id, sessionId)
}
@@ -610,6 +750,7 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
const sessionsMap = activeSessions()
const parentId = parentSessionIdForInstance()
const activeId = activeSessionIdForInstance()
const instanceSessions = sessions().get(props.instance.id)
setCachedSessionIds((current) => {
const next: string[] = []
const append = (id: string | null) => {
@@ -622,6 +763,16 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
append(parentId)
append(activeId)
const parentSessionId = parentId || activeId
const parentSession = parentSessionId ? instanceSessions?.get(parentSessionId) : undefined
const activeTaskId = parentSession?.activeTaskId
if (activeTaskId && parentSession?.tasks?.length) {
const activeTask = parentSession.tasks.find((task) => task.id === activeTaskId)
if (activeTask?.taskSessionId) {
append(activeTask.taskSessionId)
}
}
const limit = parentId ? SESSION_CACHE_LIMIT + 1 : SESSION_CACHE_LIMIT
const trimmed = next.length > limit ? next.slice(0, limit) : next
const trimmedSet = new Set(trimmed)
@@ -1180,7 +1331,7 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
const sessionLayout = (
<div
class="session-shell-panels flex flex-col flex-1 min-h-0 overflow-x-hidden relative bg-[#050505]"
class="session-shell-panels flex flex-col flex-1 min-h-0 w-full overflow-hidden relative bg-[#050505]"
ref={(element) => {
setDrawerHost(element)
measureDrawerHost()
@@ -1190,8 +1341,8 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
<div class="absolute top-[-10%] left-[-10%] w-[40%] h-[40%] bg-blue-600/10 blur-[120px] rounded-full pointer-events-none z-0" />
<div class="absolute bottom-[-10%] right-[-10%] w-[30%] h-[30%] bg-purple-600/5 blur-[100px] rounded-full pointer-events-none z-0" />
<AppBar position="sticky" color="default" elevation={0} class="border-b border-white/5 bg-[#050505]/80 backdrop-blur-md z-20">
<Toolbar variant="dense" class="session-toolbar flex flex-wrap items-center justify-between gap-2 py-0 min-h-[40px]">
<AppBar position="sticky" color="default" elevation={0} class="border-b border-white/5 bg-[#050505]/80 backdrop-blur-md z-20 shrink-0">
<Toolbar variant="dense" class="session-toolbar flex items-center justify-between gap-2 py-0 min-h-[48px]">
<div class="flex items-center space-x-4">
<IconButton
ref={setLeftToggleButtonEl}
@@ -1221,6 +1372,27 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
<div class="flex items-center space-x-4">
<Show when={activeSessionIdForInstance() && activeSessionIdForInstance() !== "info"}>
<div class="flex items-center space-x-2">
<button
onClick={() => props.onCloseSession(activeSessionIdForInstance()!)}
class="flex items-center gap-1.5 px-2.5 py-1 text-[11px] font-semibold text-zinc-400 hover:text-white hover:bg-white/10 border border-transparent hover:border-white/10 transition-all rounded-full"
title="Back to Sessions"
>
<ArrowLeft size={14} strokeWidth={2} />
<span>Back</span>
</button>
{/* Compact Button */}
<button
onClick={handleCompact}
disabled={isCompacting()}
class="flex items-center gap-1.5 px-2.5 py-1 text-[11px] font-semibold text-cyan-400 bg-cyan-500/10 border border-cyan-500/20 hover:bg-cyan-500/20 hover:border-cyan-500/40 transition-all rounded-full"
title="Compact Context: Summarize conversation to save tokens"
>
<Show when={isCompacting()} fallback={<FileArchive size={14} strokeWidth={2} />}>
<Loader2 size={14} class="animate-spin" />
</Show>
<span>Compact</span>
</button>
<ModelStatusSelector
instanceId={props.instance.id}
sessionId={activeSessionIdForInstance()!}
@@ -1246,14 +1418,10 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
onClick={handleSmartFix}
disabled={isFixing()}
title="Smart Fix: Automatically detect and fix issues in your code"
class={`transition-all flex items-center space-x-1.5 px-2 py-1 rounded-full hover:bg-white/10 ${isFixing() ? "text-blue-500" : "text-zinc-400 hover:text-white"}`}
class={`transition-all flex items-center space-x-1.5 px-3 py-1 rounded-full text-[10px] font-bold uppercase tracking-tight ${isFixing() ? "text-blue-500 smart-fix-highlight bg-blue-500/10" : "text-zinc-400 hover:text-white hover:bg-white/5"}`}
>
<Show when={isFixing()} fallback={<Sparkles size={14} class="text-blue-400" />}>
<Loader2 size={14} class="animate-spin text-blue-400" />
</Show>
<span class="text-[10px] font-bold uppercase tracking-tight">
{isFixing() ? "FIXING..." : "SMART FIX"}
</span>
<Zap size={12} class={isFixing() ? "animate-bounce" : ""} />
<span>Fix</span>
</button>
<div class="w-px h-3 bg-white/10" />
<button
@@ -1303,11 +1471,11 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
setRightOpen(newState)
setIsSoloOpen(newState)
}}
class={`flex items-center space-x-1.5 px-3 py-1 rounded-full text-[11px] font-bold transition-all ${(rightOpen() && isSoloOpen()) ? 'bg-blue-600/20 text-blue-400 border border-blue-500/30' : 'bg-white/5 text-zinc-400 border border-white/5'
class={`flex items-center space-x-1.5 px-3 py-1 rounded-full text-[10px] font-bold uppercase tracking-tight transition-all ${(rightOpen() && isSoloOpen()) ? 'bg-blue-600/20 text-blue-400 border border-blue-500/30' : 'bg-white/5 text-zinc-400 border border-white/5'
}`}
>
<span class={`w-1.5 h-1.5 bg-current rounded-full ${(rightOpen() && isSoloOpen()) ? 'animate-pulse' : ''}`} />
<span>MULTIX MODE</span>
<LayoutIcon size={12} />
<span>MultiX</span>
</button>
<IconButton
ref={setRightToggleButtonEl}
@@ -1323,38 +1491,35 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
</Toolbar>
</AppBar>
<Box sx={{ display: "flex", flex: 1, minHeight: 0, overflowX: "hidden", position: "relative", zIndex: 10 }}>
<Box sx={{ display: "flex", flex: 1, minHeight: 0, width: "100%", overflow: "hidden", position: "relative", zIndex: 10 }}>
{renderLeftPanel()}
<Box
component="main"
sx={{ flexGrow: 1, minHeight: 0, display: "flex", flexDirection: "column", overflowX: "hidden" }}
class="content-area relative"
component="div"
sx={{ flexGrow: 1, minHeight: 0, display: "flex", flexDirection: "column", overflow: "hidden" }}
class="content-area relative bg-[#050505]"
>
<div class="flex-1 flex overflow-hidden min-h-0">
<Show when={!isPhoneLayout()}>
<div class="flex-1 flex flex-col min-h-0 bg-[#0d0d0d]">
<div class="h-10 glass border-b border-white/5 flex items-center justify-between px-4 shrink-0">
<div class="flex items-center gap-2">
{/* Main workspace area */}
<div class="flex-1 flex flex-row min-h-0 w-full overflow-hidden">
{/* Center Area (Editor/Preview) */}
<div class="flex-1 flex flex-col min-h-0 bg-[#0d0d0d] overflow-hidden">
<div class="flex items-center justify-between px-4 py-2 border-b border-white/5 bg-[#111112]">
<div class="flex items-center space-x-4">
<button
type="button"
onClick={() => setCenterTab("code")}
class={`px-2.5 py-1 rounded-md text-[11px] font-semibold uppercase tracking-wide border ${centerTab() === "code"
? "bg-white/10 border-white/20 text-white"
: "border-transparent text-zinc-400 hover:text-zinc-200 hover:bg-white/5"
}`}
onClick={() => setCenterTab("code")}
>
Code
</button>
<button
type="button"
onClick={() => setCenterTab("preview")}
class={`px-2.5 py-1 rounded-md text-[11px] font-semibold uppercase tracking-wide border ${centerTab() === "preview"
? "bg-white/10 border-white/20 text-white"
: "border-transparent text-zinc-400 hover:text-zinc-200 hover:bg-white/5"
}`}
onClick={() => setCenterTab("preview")}
disabled={!previewUrl()}
title={previewUrl() || "Run build to enable preview"}
>
Preview
</button>
@@ -1367,102 +1532,26 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
)}
</Show>
</div>
<Show when={centerTab() === "preview"} fallback={<Editor file={currentFile()} />}>
<Show
when={previewUrl()}
fallback={
<div class="flex-1 flex items-center justify-center text-zinc-500">
<div class="text-center">
<p>No preview available yet.</p>
<p class="text-sm mt-2 opacity-60">Run build to detect a preview URL.</p>
</div>
</div>
}
>
{(url) => (
<div class="flex-1 min-h-0 bg-white">
<iframe
class="flex-1 w-full h-full border-none bg-black"
src={url()}
title="App Preview"
sandbox="allow-scripts allow-same-origin allow-forms allow-pointer-lock allow-popups"
/>
)}
</Show>
</Show>
</div>
</Show>
<div
class="flex flex-col relative border-l border-white/5 min-h-0 overflow-hidden min-w-0"
style={{
width: isPhoneLayout() ? "100%" : `${chatPanelWidth()}px`,
"flex-shrink": isPhoneLayout() ? 1 : 0,
}}
>
<div
class="absolute -left-1 top-0 bottom-0 w-2 cursor-col-resize z-20 hover:bg-white/5 active:bg-white/10 transition-colors"
onMouseDown={handleResizeMouseDown("chat")}
/>
<Show when={isSoloOpen()}>
<div class="flex-1 flex flex-col min-h-0 relative">
<MultiTaskChat instanceId={props.instance.id} sessionId={activeSessionIdForInstance() || ""} />
</div>
</Show>
<div class="flex-1 flex flex-col relative min-h-0"
style={{ display: isSoloOpen() ? "none" : "flex" }}>
<Show
when={showingInfoView()}
fallback={
<Show
when={cachedSessionIds().length > 0 && activeSessionIdForInstance()}
fallback={
<div class="flex items-center justify-center h-full">
<div class="text-center text-zinc-500">
<p class="mb-2">No session selected</p>
<p class="text-sm">Select a session to view messages</p>
</div>
</div>
}
>
<For each={cachedSessionIds()}>
{(sessionId) => {
const isActive = () => activeSessionIdForInstance() === sessionId
return (
<div
class="session-cache-pane flex flex-col flex-1 min-h-0"
style={{ display: isActive() ? "flex" : "none" }}
data-session-id={sessionId}
aria-hidden={!isActive()}
>
<SessionView
sessionId={sessionId}
activeSessions={activeSessions()}
instanceId={props.instance.id}
instanceFolder={props.instance.folder}
escapeInDebounce={props.escapeInDebounce}
showSidebarToggle={showEmbeddedSidebarToggle()}
onSidebarToggle={() => setLeftOpen(true)}
forceCompactStatusLayout={showEmbeddedSidebarToggle()}
isActive={isActive()}
src={previewUrl() || "about:blank"}
class="w-full h-full border-none"
title="Preview"
/>
</div>
)
}}
</For>
</Show>
}
>
<div class="info-view-pane flex flex-col flex-1 min-h-0 overflow-y-auto">
<InfoView instanceId={props.instance.id} />
</div>
</Show>
</div>
</div>
</div>
{/* Bottom Toolbar/Terminal Area */}
{/* Bottom Toolbar/Terminal Area */}
{/* Right Panel (MultiX Chat) */}
<Show when={rightOpen() && isSoloOpen()}>
<div class="flex flex-col relative border-l border-white/5 min-h-0 overflow-hidden" style={{ width: `${chatPanelWidth()}px`, "flex-shrink": 0 }}>
<MultiTaskChat instanceId={props.instance.id} sessionId={activeSessionIdForInstance()!} />
</div>
</Show>
</div>
<div
class="flex flex-col border-t border-white/5 relative bg-[#09090b] z-10 shrink-0 overflow-hidden"
style={{
@@ -1502,23 +1591,12 @@ const InstanceShell2: Component<InstanceShellProps> = (props) => {
<span class="w-1.5 h-1.5 rounded-full bg-green-500 shadow-[0_0_5px_rgba(34,197,94,0.5)]" />
<span>Sync Active</span>
</div>
<Show when={activeSessionForInstance()}>
{(session) => (
<>
<span class="hover:text-zinc-300 cursor-pointer">{session().model.modelId}</span>
<span class="hover:text-zinc-300 cursor-pointer">{session().agent}</span>
</>
)}
</Show>
</div>
</footer>
</div>
</Box>
{renderRightPanel()}
</Box>
{/* Floating Action Buttons removed - Integrated into Header */}
</div>
)

View File

@@ -7,6 +7,8 @@ import {
Settings,
Plug,
Sparkles,
RefreshCw,
Download,
ChevronRight,
ChevronDown,
Folder,
@@ -21,6 +23,7 @@ import InstanceServiceStatus from "../instance-service-status"
import McpManager from "../mcp-manager"
import { catalog, catalogLoading, catalogError, loadCatalog } from "../../stores/skills"
import { getSessionSkills, setSessionSkills } from "../../stores/session-state"
import { syncSessionsFromSdk } from "../../stores/session-api"
export interface FileNode {
name: string
@@ -132,6 +135,7 @@ export const Sidebar: Component<SidebarProps> = (props) => {
const [rootFiles, setRootFiles] = createSignal<FileNode[]>([])
const [lastRequestedTab, setLastRequestedTab] = createSignal<string | null>(null)
const [searchQuery, setSearchQuery] = createSignal("")
const [syncing, setSyncing] = createSignal(false)
const [searchResults, setSearchResults] = createSignal<FileNode[]>([])
const [searchLoading, setSearchLoading] = createSignal(false)
const [gitStatus, setGitStatus] = createSignal<{
@@ -173,6 +177,11 @@ export const Sidebar: Component<SidebarProps> = (props) => {
if (typeof window === "undefined") return
const handler = (event: Event) => {
const detail = (event as CustomEvent<{ instanceId?: string }>).detail
console.log(`[Sidebar] Received FILE_CHANGE_EVENT`, {
detail,
currentInstanceId: props.instanceId,
match: detail?.instanceId === props.instanceId
});
if (!detail || detail.instanceId !== props.instanceId) return
void refreshRootFiles()
}
@@ -317,6 +326,25 @@ export const Sidebar: Component<SidebarProps> = (props) => {
</Show>
<Show when={activeTab() === "sessions"}>
<div class="flex flex-col gap-1">
<div class="px-2 mb-2">
<button
onClick={async () => {
setSyncing(true)
try {
await syncSessionsFromSdk(props.instanceId)
} finally {
setSyncing(false)
}
}}
disabled={syncing()}
class="w-full flex items-center justify-center gap-2 px-3 py-2 text-xs font-semibold uppercase tracking-wide rounded-md bg-blue-500/10 text-blue-400 border border-blue-500/20 hover:bg-blue-500/20 disabled:opacity-50 transition-all"
>
<Show when={syncing()} fallback={<Download size={14} />}>
<RefreshCw size={14} class="animate-spin" />
</Show>
{syncing() ? "Syncing..." : "Sync SDK Sessions"}
</button>
</div>
<For each={props.sessions}>
{(session) => (
<div
@@ -473,8 +501,7 @@ export const Sidebar: Component<SidebarProps> = (props) => {
<button
type="button"
onClick={() => toggleSkillSelection(skill.id)}
class={`w-full text-left px-3 py-2 rounded-md border transition-colors ${
isSelected()
class={`w-full text-left px-3 py-2 rounded-md border transition-colors ${isSelected()
? "border-blue-500/60 bg-blue-500/10 text-blue-200"
: "border-white/10 bg-white/5 text-zinc-300 hover:text-white"
}`}

View File

@@ -1,4 +1,5 @@
import { createEffect, createSignal, onMount, onCleanup } from "solid-js"
import { addDebugLog } from "./debug-overlay"
import { renderMarkdown, onLanguagesLoaded, initMarkdown, decodeHtmlEntities } from "../lib/markdown"
import type { TextPart, RenderCache } from "../types/message"
import { getLogger } from "../lib/logger"
@@ -16,6 +17,7 @@ interface MarkdownProps {
size?: "base" | "sm" | "tight"
disableHighlight?: boolean
onRendered?: () => void
instanceId: string
}
export function Markdown(props: MarkdownProps) {
@@ -27,7 +29,7 @@ export function Markdown(props: MarkdownProps) {
Promise.resolve().then(() => props.onRendered?.())
}
createEffect(async () => {
createEffect(() => {
const part = props.part
const rawText = typeof part.text === "string" ? part.text : ""
const text = decodeHtmlEntities(rawText)
@@ -39,6 +41,7 @@ export function Markdown(props: MarkdownProps) {
latestRequestedText = text
// 1. Check Synchronous Local Cache
const localCache = part.renderCache
if (localCache && localCache.text === text && localCache.theme === themeKey) {
setHtml(localCache.html)
@@ -46,6 +49,7 @@ export function Markdown(props: MarkdownProps) {
return
}
// 2. Check Global Cache
const globalCache = markdownRenderCache.get(cacheKey)
if (globalCache && globalCache.text === text) {
setHtml(globalCache.html)
@@ -54,11 +58,13 @@ export function Markdown(props: MarkdownProps) {
return
}
if (!highlightEnabled) {
part.renderCache = undefined
// 3. Throttle/Debounce Rendering for new content
// We delay the expensive async render to avoid choking the main thread during rapid streaming
const performRender = async () => {
if (latestRequestedText !== text) return // Stale
try {
const rendered = await renderMarkdown(text, { suppressHighlight: true })
const rendered = await renderMarkdown(text, { suppressHighlight: !highlightEnabled })
if (latestRequestedText === text) {
const cacheEntry: RenderCache = { text, html: rendered, theme: themeKey }
@@ -70,36 +76,18 @@ export function Markdown(props: MarkdownProps) {
} catch (error) {
log.error("Failed to render markdown:", error)
if (latestRequestedText === text) {
const cacheEntry: RenderCache = { text, html: text, theme: themeKey }
setHtml(text)
part.renderCache = cacheEntry
markdownRenderCache.set(cacheKey, cacheEntry)
notifyRendered()
setHtml(text) // Fallback
}
}
return
}
try {
const rendered = await renderMarkdown(text)
// Heuristic: If text length matches cache length + small amount, it's streaming.
// We can debounce. If it's a huge jump (initial load), render immediately.
// For now, always debounce slightly to unblock main thread.
// Using 200ms (was 50ms) for less frequent but smoother updates
const timerId = setTimeout(performRender, 200)
if (latestRequestedText === text) {
const cacheEntry: RenderCache = { text, html: rendered, theme: themeKey }
setHtml(rendered)
part.renderCache = cacheEntry
markdownRenderCache.set(cacheKey, cacheEntry)
notifyRendered()
}
} catch (error) {
log.error("Failed to render markdown:", error)
if (latestRequestedText === text) {
const cacheEntry: RenderCache = { text, html: text, theme: themeKey }
setHtml(text)
part.renderCache = cacheEntry
markdownRenderCache.set(cacheKey, cacheEntry)
notifyRendered()
}
}
onCleanup(() => clearTimeout(timerId))
})
onMount(() => {
@@ -121,6 +109,31 @@ export function Markdown(props: MarkdownProps) {
}, 2000)
}
}
return
}
const previewButton = target.closest(".code-block-preview") as HTMLButtonElement
if (previewButton) {
e.preventDefault()
const code = previewButton.getAttribute("data-code")
const lang = previewButton.getAttribute("data-lang")
if (code && lang === "html") {
const decodedCode = decodeURIComponent(code)
// Try to find a filename in the text part
const contentText = props.part.text || ""
const fileMatch = contentText.match(/(\w+\.html)/)
const fileName = fileMatch ? fileMatch[1] : null
window.dispatchEvent(new CustomEvent("MANUAL_PREVIEW_EVENT", {
detail: {
code: decodedCode,
fileName: fileName,
instanceId: props.instanceId
}
}))
}
}
}

View File

@@ -3,7 +3,6 @@ import { ChevronDown, ExternalLink, Plus, RefreshCw, Search, Settings } from "lu
import { Component, For, Show, createEffect, createMemo, createSignal } from "solid-js"
import { serverApi } from "../lib/api-client"
import { getLogger } from "../lib/logger"
import InstanceServiceStatus from "./instance-service-status"
import { useOptionalInstanceMetadataContext } from "../lib/contexts/instance-metadata-context"
type McpServerConfig = {
@@ -110,6 +109,9 @@ const McpManager: Component<McpManagerProps> = (props) => {
const [serverName, setServerName] = createSignal("")
const [serverJson, setServerJson] = createSignal("")
const [saving, setSaving] = createSignal(false)
const [connectionStatus, setConnectionStatus] = createSignal<Record<string, { connected: boolean }>>({})
const [toolCount, setToolCount] = createSignal(0)
const [connecting, setConnecting] = createSignal(false)
const metadataContext = useOptionalInstanceMetadataContext()
const metadata = createMemo(() => metadataContext?.metadata?.() ?? null)
@@ -138,6 +140,50 @@ const McpManager: Component<McpManagerProps> = (props) => {
} finally {
setIsLoading(false)
}
// Fetch connection status separately (non-blocking)
loadConnectionStatus().catch(() => { })
}
const loadConnectionStatus = async () => {
try {
const controller = new AbortController()
const timeoutId = setTimeout(() => controller.abort(), 5000)
const status = await serverApi.fetchWorkspaceMcpStatus(props.instanceId)
clearTimeout(timeoutId)
setConnectionStatus(status.servers ?? {})
setToolCount(status.toolCount ?? 0)
} catch (err) {
log.warn("Failed to fetch MCP status", err)
// Don't block UI on status failures
}
}
const connectAllMcps = async () => {
if (connecting()) return
setConnecting(true)
setError(null)
try {
log.info("Connecting to all MCP servers...")
const result = await serverApi.connectWorkspaceMcps(props.instanceId)
log.info("MCP connection result:", result)
setConnectionStatus(result.servers ?? {})
setToolCount(result.toolCount ?? 0)
// Check for any connection errors
const connectionDetails = (result as any).connectionDetails ?? {}
const failedServers = Object.entries(connectionDetails)
.filter(([_, details]: [string, any]) => !details.connected)
.map(([name, details]: [string, any]) => `${name}: ${details.error || 'Unknown error'}`)
if (failedServers.length > 0) {
setError(`Some servers failed to connect: ${failedServers.join(', ')}`)
}
} catch (err) {
log.error("Failed to connect MCPs", err)
setError("Failed to connect MCP servers. Check console for details.")
} finally {
setConnecting(false)
}
}
createEffect(() => {
@@ -204,6 +250,8 @@ const McpManager: Component<McpManagerProps> = (props) => {
nextConfig.mcpServers = mcpServers
setConfig(nextConfig)
await serverApi.updateWorkspaceMcpConfig(props.instanceId, nextConfig)
// Auto-connect after installing
await loadConnectionStatus()
} catch (err) {
const message = err instanceof Error ? err.message : "Failed to install MCP server."
setError(message)
@@ -325,44 +373,56 @@ const McpManager: Component<McpManagerProps> = (props) => {
{(err) => <div class="text-[11px] text-amber-400">{err()}</div>}
</Show>
<Show when={toolCount() > 0}>
<div class="text-[11px] text-green-400 mb-2">
{toolCount()} MCP tools available
</div>
</Show>
<Show
when={!isLoading() && servers().length > 0}
fallback={<div class="text-[11px] text-zinc-500 italic">{isLoading() ? "Loading MCP servers..." : "No MCP servers configured."}</div>}
>
<div class="mcp-server-list">
<For each={servers()}>
{([name, server]) => (
{([name, server]) => {
const isConnected = () => connectionStatus()[name]?.connected ?? false
return (
<div class="mcp-server-card">
<div class="mcp-server-row">
<div class="flex flex-col">
<span class="text-xs font-semibold text-zinc-100">{name}</span>
<span class="text-[11px] text-zinc-500 truncate">
{server.command ? `${server.command} ${(server.args ?? []).join(" ")}` : "Custom config"}
{server.command ? `${server.command} ${(server.args ?? []).join(" ")}` : server.url || "Custom config"}
</span>
</div>
<div class="flex items-center gap-2">
<Show when={mcpStatus()?.[name]?.status}>
<span class="mcp-status-chip">
{mcpStatus()?.[name]?.status}
<Show when={isConnected()}>
<span class="mcp-status-chip" style={{ background: "var(--status-ok, #22c55e)", color: "#fff" }}>
connected
</span>
</Show>
<Show when={mcpStatus()?.[name]?.error}>
<span class="mcp-status-error" title={String(mcpStatus()?.[name]?.error)}>
error
<Show when={!isConnected()}>
<span class="mcp-status-chip" style={{ background: "var(--status-warning, #eab308)", color: "#000" }}>
not connected
</span>
</Show>
</div>
</div>
</div>
)}
)
}}
</For>
</div>
<button
onClick={connectAllMcps}
disabled={connecting()}
class="mt-2 px-3 py-1.5 text-xs rounded-md bg-blue-500/20 border border-blue-500/40 text-blue-200 hover:text-white disabled:opacity-60 w-full"
>
{connecting() ? "Connecting..." : "Connect All MCPs"}
</button>
</Show>
<div class="mt-3">
<InstanceServiceStatus sections={["mcp"]} />
</div>
<Dialog open={showManual()} onOpenChange={setShowManual} modal>
<Dialog.Portal>
<Dialog.Overlay class="modal-overlay" />

View File

@@ -1,10 +1,7 @@
import { Index, type Accessor, createEffect } from "solid-js"
import { Index, type Accessor } from "solid-js"
import VirtualItem from "./virtual-item"
import MessageBlock from "./message-block"
import type { InstanceMessageStore } from "../stores/message-v2/instance-store"
import { getLogger } from "../lib/logger"
const log = getLogger("multix-chat")
export function getMessageAnchorId(messageId: string) {
return `message-anchor-${messageId}`
@@ -31,14 +28,6 @@ interface MessageBlockListProps {
}
export default function MessageBlockList(props: MessageBlockListProps) {
createEffect(() => {
const ids = props.messageIds();
log.info("[MessageBlockList] messageIds changed", {
count: ids.length,
ids: ids.slice(-3) // Log last 3 for context
});
});
return (
<>
<Index each={props.messageIds()}>

View File

@@ -1,4 +1,5 @@
import { For, Match, Show, Switch, createEffect, createMemo, createSignal } from "solid-js"
import { For, Match, Show, Switch, createEffect, createMemo, createSignal, untrack } from "solid-js"
import { addDebugLog } from "./debug-overlay"
import MessageItem from "./message-item"
import ToolCall from "./tool-call"
import type { InstanceMessageStore } from "../stores/message-v2/instance-store"
@@ -215,14 +216,30 @@ interface MessageBlockProps {
}
export default function MessageBlock(props: MessageBlockProps) {
const record = createMemo(() => props.store().getMessage(props.messageId))
const messageInfo = createMemo(() => props.store().getMessageInfo(props.messageId))
// CRITICAL FIX: Use untrack for store access to prevent cascading updates during streaming
// The component will still re-render when needed via the Index component in MessageBlockList
const record = createMemo(() => {
// Only create reactive dependency on message ID, not content
const id = props.messageId;
return untrack(() => props.store().getMessage(id));
})
const messageInfo = createMemo(() => {
const id = props.messageId;
return untrack(() => props.store().getMessageInfo(id));
})
const sessionCache = getSessionRenderCache(props.instanceId, props.sessionId)
// CRITICAL: Use a throttled revision check to avoid re-computing on every streaming chunk
const [lastProcessedRevision, setLastProcessedRevision] = createSignal(0);
const block = createMemo<MessageDisplayBlock | null>(() => {
const current = record()
if (!current) return null
// OPTIMIZATION: Skip cache during streaming (revision changes too fast)
// Just return a basic block structure that will be updated when streaming completes
const isStreaming = current.status === "streaming" || current.status === "sending";
const index = props.messageIndex
const lastAssistantIdx = props.lastAssistantIndex()
const isQueued = current.role === "user" && (lastAssistantIdx === -1 || index > lastAssistantIdx)
@@ -236,9 +253,11 @@ export default function MessageBlock(props: MessageBlockProps) {
: infoTime.created ?? 0
const infoError = (info as { error?: { name?: string } } | undefined)?.error
const infoErrorName = typeof infoError?.name === "string" ? infoError.name : ""
// Skip revision in cache signature during streaming
const cacheSignature = [
current.id,
current.revision,
isStreaming ? "streaming" : current.revision,
isQueued ? 1 : 0,
props.showThinking() ? 1 : 0,
props.thinkingDefaultExpanded() ? 1 : 0,
@@ -270,9 +289,11 @@ export default function MessageBlock(props: MessageBlockProps) {
current.role === "assistant" &&
!agentMetaAttached &&
pendingParts.some((part) => partHasRenderableText(part))
let cached = sessionCache.messageItems.get(segmentKey)
if (!cached) {
cached = {
// Always create a fresh object to ensure granular reactivity in <For>
// when we remove 'keyed' from <Show>. If we mutated properties
// on an existing object, <For> would assume identity match and skip updates.
const cached: ContentDisplayItem = {
type: "content",
key: segmentKey,
record: current,
@@ -281,14 +302,10 @@ export default function MessageBlock(props: MessageBlockProps) {
isQueued,
showAgentMeta: shouldShowAgentMeta,
}
// Update cache with the new version (for potential stability elsewhere, though less critical now)
sessionCache.messageItems.set(segmentKey, cached)
} else {
cached.record = current
cached.parts = pendingParts.slice()
cached.messageInfo = info
cached.isQueued = isQueued
cached.showAgentMeta = shouldShowAgentMeta
}
if (shouldShowAgentMeta) {
agentMetaAttached = true
}
@@ -396,10 +413,10 @@ export default function MessageBlock(props: MessageBlockProps) {
})
return (
<Show when={block()} keyed>
<Show when={block()}>
{(resolvedBlock) => (
<div class="message-stream-block" data-message-id={resolvedBlock.record.id}>
<For each={resolvedBlock.items}>
<div class="message-stream-block" data-message-id={resolvedBlock().record.id}>
<For each={resolvedBlock().items}>
{(item) => (
<Switch>
<Match when={item.type === "content"}>

View File

@@ -14,8 +14,8 @@ interface MessagePartProps {
instanceId: string
sessionId: string
onRendered?: () => void
}
export default function MessagePart(props: MessagePartProps) {
}
export default function MessagePart(props: MessagePartProps) {
const { isDark } = useTheme()
const { preferences } = useConfig()
@@ -105,6 +105,7 @@ interface MessagePartProps {
isDark={isDark()}
size={isAssistantMessage() ? "tight" : "base"}
onRendered={props.onRendered}
instanceId={props.instanceId}
/>
</Show>

View File

@@ -1,7 +1,7 @@
import { Combobox } from "@kobalte/core/combobox"
import { createEffect, createMemo, createSignal, onCleanup, onMount } from "solid-js"
import { createEffect, createMemo, createSignal, onCleanup, onMount, Show } from "solid-js"
import { providers, fetchProviders } from "../stores/sessions"
import { ChevronDown } from "lucide-solid"
import { ChevronDown, Database } from "lucide-solid"
import type { Model } from "../types/session"
import { getLogger } from "../lib/logger"
import { getUserScopedKey } from "../lib/user-storage"
@@ -29,6 +29,11 @@ export default function ModelSelector(props: ModelSelectorProps) {
const [isOpen, setIsOpen] = createSignal(false)
const qwenAuth = useQwenOAuth()
const [offlineModels, setOfflineModels] = createSignal<Set<string>>(new Set())
// Context-Engine status: "stopped" | "ready" | "indexing" | "error"
type ContextEngineStatus = "stopped" | "ready" | "indexing" | "error"
const [contextEngineStatus, setContextEngineStatus] = createSignal<ContextEngineStatus>("stopped")
let triggerRef!: HTMLButtonElement
let searchInputRef!: HTMLInputElement
@@ -64,9 +69,28 @@ export default function ModelSelector(props: ModelSelectorProps) {
}
window.addEventListener("opencode-zen-offline-models", handleCustom as EventListener)
window.addEventListener("storage", handleStorage)
// DISABLED: Context-Engine polling was causing performance issues
// const pollContextEngine = async () => {
// try {
// const response = await fetch("/api/context-engine/status")
// if (response.ok) {
// const data = await response.json() as { status: ContextEngineStatus }
// setContextEngineStatus(data.status ?? "stopped")
// } else {
// setContextEngineStatus("stopped")
// }
// } catch {
// setContextEngineStatus("stopped")
// }
// }
// pollContextEngine()
// const pollInterval = setInterval(pollContextEngine, 5000)
onCleanup(() => {
window.removeEventListener("opencode-zen-offline-models", handleCustom as EventListener)
window.removeEventListener("storage", handleStorage)
// clearInterval(pollInterval)
})
})
@@ -170,6 +194,29 @@ export default function ModelSelector(props: ModelSelectorProps) {
{currentModelValue() && isOfflineModel(currentModelValue() as FlatModel) && (
<span class="selector-badge selector-badge-warning">Offline</span>
)}
{/* Context-Engine RAG Status Indicator */}
<Show when={contextEngineStatus() !== "stopped"}>
<span
class="inline-flex items-center gap-1 text-[10px]"
title={
contextEngineStatus() === "ready"
? "Context Engine is active - RAG enabled"
: contextEngineStatus() === "indexing"
? "Context Engine is indexing files..."
: "Context Engine error"
}
>
<span
class={`w-2 h-2 rounded-full ${contextEngineStatus() === "ready"
? "bg-emerald-500"
: contextEngineStatus() === "indexing"
? "bg-blue-500 animate-pulse"
: "bg-red-500"
}`}
/>
<Database class="w-3 h-3 text-zinc-400" />
</span>
</Show>
</span>
{currentModelValue() && (
<span class="selector-trigger-secondary">

View File

@@ -1,5 +1,5 @@
import { Component, For, Show, createEffect, createMemo, createSignal, onCleanup } from "solid-js"
import { FolderOpen, Trash2, Check, AlertCircle, Loader2, Plus } from "lucide-solid"
import { FolderOpen, Trash2, Check, AlertCircle, Loader2, Plus, Sparkles } from "lucide-solid"
import { useConfig } from "../stores/preferences"
import { serverApi } from "../lib/api-client"
import FileSystemBrowserDialog from "./filesystem-browser-dialog"
@@ -7,12 +7,15 @@ import { openNativeFileDialog, supportsNativeDialogs } from "../lib/native/nativ
import { getLogger } from "../lib/logger"
const log = getLogger("actions")
// Special constant for Native mode (no OpenCode binary)
const NATIVE_MODE_PATH = "__nomadarch_native__"
interface BinaryOption {
path: string
version?: string
lastUsed?: number
isDefault?: boolean
isNative?: boolean
}
interface OpenCodeBinarySelectorProps {
@@ -44,10 +47,17 @@ const OpenCodeBinarySelector: Component<OpenCodeBinarySelectorProps> = (props) =
const customBinaries = createMemo(() => binaries().filter((binary) => binary.path !== "opencode"))
const binaryOptions = createMemo<BinaryOption[]>(() => [{ path: "opencode", isDefault: true }, ...customBinaries()])
// Include NomadArch Native as the first option
const binaryOptions = createMemo<BinaryOption[]>(() => [
{ path: NATIVE_MODE_PATH, isNative: true },
{ path: "opencode", isDefault: true },
...customBinaries()
])
const currentSelectionPath = () => props.selectedBinary || "opencode"
const isNativeMode = () => currentSelectionPath() === NATIVE_MODE_PATH
createEffect(() => {
if (!props.selectedBinary && lastUsedBinary()) {
props.onBinaryChange(lastUsedBinary()!)
@@ -97,6 +107,11 @@ const OpenCodeBinarySelector: Component<OpenCodeBinarySelectorProps> = (props) =
})
async function validateBinary(path: string): Promise<{ valid: boolean; version?: string; error?: string }> {
// Native mode is always valid
if (path === NATIVE_MODE_PATH) {
return { valid: true, version: "Native" }
}
if (versionInfo().has(path)) {
const cachedVersion = versionInfo().get(path)
return cachedVersion ? { valid: true, version: cachedVersion } : { valid: true }
@@ -209,6 +224,7 @@ const OpenCodeBinarySelector: Component<OpenCodeBinarySelectorProps> = (props) =
}
function getDisplayName(path: string): string {
if (path === NATIVE_MODE_PATH) return "🚀 NomadArch Native"
if (path === "opencode") return "opencode (system PATH)"
const parts = path.split(/[/\\]/)
return parts[parts.length - 1] ?? path
@@ -277,18 +293,95 @@ const OpenCodeBinarySelector: Component<OpenCodeBinarySelectorProps> = (props) =
</div>
</div>
</Show>
{/* Mode Comparison Info */}
<div class="rounded-lg border border-white/10 overflow-hidden">
<details class="group">
<summary class="flex items-center justify-between px-3 py-2 cursor-pointer bg-white/5 hover:bg-white/10 transition-colors">
<span class="text-xs font-medium text-muted">📊 Compare: Native vs SDK Mode</span>
<svg class="w-4 h-4 text-muted transition-transform group-open:rotate-180" fill="none" viewBox="0 0 24 24" stroke="currentColor">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M19 9l-7 7-7-7" />
</svg>
</summary>
<div class="p-3 space-y-3 text-xs bg-black/20">
{/* Native Mode */}
<div class="space-y-1.5">
<div class="flex items-center gap-2 text-emerald-400 font-medium">
<Sparkles class="w-3.5 h-3.5" />
<span>NomadArch Native (Recommended)</span>
</div>
<div class="pl-5 space-y-1 text-muted">
<div class="flex items-center gap-2">
<span class="text-emerald-400"></span>
<span>No external binary required</span>
</div>
<div class="flex items-center gap-2">
<span class="text-emerald-400"></span>
<span>Free Zen models (GPT-5 Nano, Grok Code, GLM-4.7)</span>
</div>
<div class="flex items-center gap-2">
<span class="text-emerald-400"></span>
<span>Faster startup, simpler setup</span>
</div>
<div class="flex items-center gap-2">
<span class="text-emerald-400"></span>
<span>Full MCP tool support</span>
</div>
<div class="flex items-center gap-2">
<span class="text-amber-400"></span>
<span>No LSP integration (coming soon)</span>
</div>
</div>
</div>
{/* SDK Mode */}
<div class="space-y-1.5 pt-2 border-t border-white/10">
<div class="flex items-center gap-2 text-blue-400 font-medium">
<Check class="w-3.5 h-3.5" />
<span>OpenCode SDK Mode</span>
</div>
<div class="pl-5 space-y-1 text-muted">
<div class="flex items-center gap-2">
<span class="text-blue-400"></span>
<span>Full LSP integration</span>
</div>
<div class="flex items-center gap-2">
<span class="text-blue-400"></span>
<span>All OpenCode features</span>
</div>
<div class="flex items-center gap-2">
<span class="text-blue-400"></span>
<span>More provider options</span>
</div>
<div class="flex items-center gap-2">
<span class="text-amber-400"></span>
<span>Requires binary download</span>
</div>
<div class="flex items-center gap-2">
<span class="text-amber-400"></span>
<span>Platform-specific binaries</span>
</div>
</div>
</div>
</div>
</details>
</div>
</div>
<div class="panel-list panel-list--fill max-h-80 overflow-y-auto">
<For each={binaryOptions()}>
{(binary) => {
const isDefault = binary.isDefault
const isNative = binary.isNative
const versionLabel = () => versionInfo().get(binary.path) ?? binary.version
return (
<div
class="panel-list-item flex items-center"
classList={{ "panel-list-item-highlight": currentSelectionPath() === binary.path }}
classList={{
"panel-list-item-highlight": currentSelectionPath() === binary.path,
"bg-gradient-to-r from-emerald-500/10 to-cyan-500/10 border-l-2 border-emerald-500": isNative && currentSelectionPath() === binary.path,
}}
>
<button
type="button"
@@ -298,31 +391,48 @@ const OpenCodeBinarySelector: Component<OpenCodeBinarySelectorProps> = (props) =
>
<div class="flex flex-col flex-1 min-w-0 gap-1.5">
<div class="flex items-center gap-2">
<Show when={isNative}>
<Sparkles
class={`w-4 h-4 transition-opacity ${currentSelectionPath() === binary.path ? "text-emerald-400" : "text-muted"}`}
/>
</Show>
<Show when={!isNative}>
<Check
class={`w-4 h-4 transition-opacity ${currentSelectionPath() === binary.path ? "opacity-100" : "opacity-0"}`}
/>
<span class="text-sm font-medium truncate text-primary">{getDisplayName(binary.path)}</span>
</Show>
<span class={`text-sm font-medium truncate ${isNative ? "text-emerald-400" : "text-primary"}`}>
{getDisplayName(binary.path)}
</span>
<Show when={isNative}>
<span class="text-[10px] px-1.5 py-0.5 rounded bg-emerald-500/20 text-emerald-400 font-medium">
RECOMMENDED
</span>
</Show>
</div>
<Show when={!isDefault}>
<Show when={!isDefault && !isNative}>
<div class="text-xs font-mono truncate pl-6 text-muted">{binary.path}</div>
</Show>
<div class="flex items-center gap-2 text-xs text-muted pl-6 flex-wrap">
<Show when={versionLabel()}>
<Show when={versionLabel() && !isNative}>
<span class="selector-badge-version">v{versionLabel()}</span>
</Show>
<Show when={isPathValidating(binary.path)}>
<span class="selector-badge-time">Checking</span>
</Show>
<Show when={!isDefault && binary.lastUsed}>
<Show when={!isDefault && !isNative && binary.lastUsed}>
<span class="selector-badge-time">{formatRelativeTime(binary.lastUsed)}</span>
</Show>
<Show when={isDefault}>
<span class="selector-badge-time">Use binary from system PATH</span>
</Show>
<Show when={isNative}>
<span class="text-emerald-400/70">No OpenCode binary needed Free Zen models included</span>
</Show>
</div>
</div>
</button>
<Show when={!isDefault}>
<Show when={!isDefault && !isNative}>
<button
type="button"
class="p-2 text-muted hover:text-primary"
@@ -352,5 +462,7 @@ const OpenCodeBinarySelector: Component<OpenCodeBinarySelectorProps> = (props) =
)
}
export default OpenCodeBinarySelector
// Export the native mode constant for use elsewhere
export const NOMADARCH_NATIVE_MODE = NATIVE_MODE_PATH
export default OpenCodeBinarySelector

View File

@@ -125,8 +125,8 @@ export function RemoteAccessOverlay(props: RemoteAccessOverlayProps) {
<header class="remote-header">
<div>
<p class="remote-eyebrow">Remote handover</p>
<h2 class="remote-title">Connect to CodeNomad remotely</h2>
<p class="remote-subtitle">Use the addresses below to open CodeNomad from another device.</p>
<h2 class="remote-title">Connect to NomadArch remotely</h2>
<p class="remote-subtitle">Use the addresses below to open NomadArch from another device.</p>
</div>
<button type="button" class="remote-close" onClick={props.onClose} aria-label="Close remote access">
×

View File

@@ -0,0 +1,672 @@
import { Component, createSignal, onMount, onCleanup, For, Show } from 'solid-js'
import { Rocket, CheckCircle, XCircle, Loader, Sparkles, LogIn, LogOut, Shield, ExternalLink, Copy } from 'lucide-solid'
import { getUserScopedKey } from '../../lib/user-storage'
import { instances } from '../../stores/instances'
import { fetchProviders } from '../../stores/session-api'
interface AntigravityModel {
id: string
name: string
family?: string
reasoning?: boolean
tool_call?: boolean
limit?: {
context: number
output: number
}
}
interface AntigravityToken {
access_token: string
refresh_token?: string
expires_in: number
created_at: number
}
const ANTIGRAVITY_TOKEN_KEY = "antigravity_oauth_token"
const ANTIGRAVITY_PROJECT_KEY = "antigravity_project_id"
const AntigravitySettings: Component = () => {
const [models, setModels] = createSignal<AntigravityModel[]>([])
const [isLoading, setIsLoading] = createSignal(true)
const [connectionStatus, setConnectionStatus] = createSignal<'idle' | 'testing' | 'connected' | 'failed'>('idle')
const [connectionIssue, setConnectionIssue] = createSignal<{ title: string; message: string; link?: string } | null>(null)
const [authStatus, setAuthStatus] = createSignal<'unknown' | 'authenticated' | 'unauthenticated'>('unknown')
const [error, setError] = createSignal<string | null>(null)
const [projectId, setProjectId] = createSignal("")
// Device auth state
const [isAuthenticating, setIsAuthenticating] = createSignal(false)
const [deviceAuthSession, setDeviceAuthSession] = createSignal<{
sessionId: string
userCode?: string
verificationUrl: string
} | null>(null)
const [copied, setCopied] = createSignal(false)
let pollInterval: number | undefined
// Check stored token on mount
onMount(async () => {
const storedProjectId = window.localStorage.getItem(getUserScopedKey(ANTIGRAVITY_PROJECT_KEY))
if (storedProjectId) {
setProjectId(storedProjectId)
}
checkAuthStatus()
await loadModels()
await testConnection()
})
onCleanup(() => {
if (pollInterval) {
clearInterval(pollInterval)
}
})
const getStoredToken = (): AntigravityToken | null => {
if (typeof window === "undefined") return null
try {
const raw = window.localStorage.getItem(getUserScopedKey(ANTIGRAVITY_TOKEN_KEY))
if (!raw) return null
return JSON.parse(raw)
} catch {
return null
}
}
const isTokenValid = (token: AntigravityToken | null): boolean => {
if (!token) return false
const createdAt = token.created_at > 1e12 ? Math.floor(token.created_at / 1000) : token.created_at
const expiresAt = (createdAt + token.expires_in) * 1000 - 300000 // 5 min buffer
return Date.now() < expiresAt
}
const parseSubscriptionIssue = (raw: string | null | undefined) => {
if (!raw) return null
try {
const parsed = JSON.parse(raw)
const errorPayload = parsed?.error
const message = typeof errorPayload?.message === "string" ? errorPayload.message : raw
const details = Array.isArray(errorPayload?.details) ? errorPayload.details : []
const reason = details.find((entry: any) => entry?.reason)?.reason
const helpLink = details
.flatMap((entry: any) => Array.isArray(entry?.links) ? entry.links : [])
.find((link: any) => typeof link?.url === "string")?.url
if (reason === "SUBSCRIPTION_REQUIRED" || /Gemini Code Assist license/i.test(message)) {
return {
title: "Subscription required",
message,
link: helpLink
}
}
} catch {
if (/SUBSCRIPTION_REQUIRED/i.test(raw) || /Gemini Code Assist license/i.test(raw)) {
return {
title: "Subscription required",
message: raw
}
}
}
return null
}
const getAuthHeaders = () => {
const token = getStoredToken()
const headers: Record<string, string> = {}
if (token?.access_token && isTokenValid(token)) {
headers.Authorization = `Bearer ${token.access_token}`
}
if (projectId()) {
headers["X-Antigravity-Project"] = projectId()
}
return Object.keys(headers).length > 0 ? headers : undefined
}
const checkAuthStatus = () => {
const token = getStoredToken()
if (isTokenValid(token)) {
setAuthStatus('authenticated')
} else {
setAuthStatus('unauthenticated')
}
}
const loadModels = async () => {
setIsLoading(true)
try {
const response = await fetch('/api/antigravity/models', {
headers: getAuthHeaders()
})
if (response.ok) {
const data = await response.json()
setModels(data.models || [])
setError(null)
} else {
throw new Error('Failed to load models')
}
} catch (err) {
console.error('Failed to load Antigravity models:', err)
setError('Failed to load models')
} finally {
setIsLoading(false)
}
}
const testConnection = async () => {
setConnectionStatus('testing')
setConnectionIssue(null)
try {
const response = await fetch('/api/antigravity/test', {
headers: getAuthHeaders()
})
if (response.ok) {
const data = await response.json()
setConnectionStatus(data.connected ? 'connected' : 'failed')
const issue = parseSubscriptionIssue(data.error)
if (issue) {
setConnectionIssue(issue)
}
} else {
const errorText = await response.text().catch(() => "")
const issue = parseSubscriptionIssue(errorText)
if (issue) {
setConnectionIssue(issue)
}
setConnectionStatus('failed')
}
} catch (err) {
setConnectionStatus('failed')
}
}
const offlineLabel = () => connectionIssue()?.title ?? "Offline"
// Start device authorization flow
const startDeviceAuth = async () => {
setIsAuthenticating(true)
setError(null)
try {
const response = await fetch('/api/antigravity/device-auth/start', {
method: 'POST'
})
if (!response.ok) {
const errorData = await response.json().catch(() => ({}))
const base = errorData.error || 'Failed to start authentication'
const details = errorData.details ? ` - ${errorData.details}` : ''
throw new Error(`${base}${details}`)
}
const data = await response.json() as {
sessionId: string
userCode?: string
verificationUrl: string
expiresIn: number
interval: number
}
setDeviceAuthSession({
sessionId: data.sessionId,
userCode: data.userCode || "",
verificationUrl: data.verificationUrl
})
// Start polling for token
const pollIntervalMs = (data.interval || 5) * 1000
pollInterval = window.setInterval(() => {
pollForToken(data.sessionId)
}, pollIntervalMs)
// Open verification URL in new tab
window.open(data.verificationUrl, '_blank')
} catch (err: any) {
console.error('Device auth error:', err)
// Try to get detailed error message
let errorMessage = err.message || 'Authentication failed'
if (err.details) {
errorMessage += ` - ${err.details}`
}
setError(errorMessage)
setIsAuthenticating(false)
}
}
// Poll for token completion
const pollForToken = async (sessionId: string) => {
try {
const response = await fetch('/api/antigravity/device-auth/poll', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ sessionId })
})
if (!response.ok) {
const errorData = await response.json().catch(() => ({}))
if (response.status === 410 || response.status === 404) {
// Session expired
stopPolling()
setError('Session expired. Please try again.')
return
}
throw new Error(errorData.error || 'Poll failed')
}
const data = await response.json() as any
if (data.status === 'pending') {
// Still waiting, continue polling
return
}
if (data.status === 'success') {
// Got tokens! Save them
const token: AntigravityToken = {
access_token: data.accessToken,
refresh_token: data.refreshToken,
expires_in: data.expiresIn,
created_at: Date.now()
}
window.localStorage.setItem(
getUserScopedKey(ANTIGRAVITY_TOKEN_KEY),
JSON.stringify(token)
)
stopPolling()
setAuthStatus('authenticated')
setError(null)
loadModels()
await testConnection()
for (const instance of instances().values()) {
try {
await fetchProviders(instance.id)
} catch (refreshError) {
console.error(`Failed to refresh providers for instance ${instance.id}:`, refreshError)
}
}
return
}
if (data.status === 'denied') {
stopPolling()
setError('Access was denied. Please try again.')
return
}
if (data.status === 'expired') {
stopPolling()
setError('Session expired. Please try again.')
return
}
if (data.status === 'error') {
stopPolling()
setError(data.error || 'Authentication failed')
return
}
} catch (err: any) {
console.error('Poll error:', err)
// Don't stop polling on network errors, just log them
}
}
const stopPolling = () => {
if (pollInterval) {
clearInterval(pollInterval)
pollInterval = undefined
}
setIsAuthenticating(false)
setDeviceAuthSession(null)
}
const cancelAuth = () => {
stopPolling()
setError(null)
}
const signOut = () => {
window.localStorage.removeItem(getUserScopedKey(ANTIGRAVITY_TOKEN_KEY))
setAuthStatus('unauthenticated')
setConnectionIssue(null)
setConnectionStatus('idle')
for (const instance of instances().values()) {
fetchProviders(instance.id).catch((refreshError) => {
console.error(`Failed to refresh providers for instance ${instance.id}:`, refreshError)
})
}
}
const copyCode = async () => {
const session = deviceAuthSession()
if (session?.userCode) {
await navigator.clipboard.writeText(session.userCode)
setCopied(true)
setTimeout(() => setCopied(false), 2000)
}
}
const formatNumber = (num: number): string => {
if (num >= 1000000) return `${(num / 1000000).toFixed(1)}M`
if (num >= 1000) return `${(num / 1000).toFixed(0)}K`
return num.toString()
}
const getModelFamily = (model: AntigravityModel): { label: string; color: string } => {
if (model.id.startsWith('gemini')) return { label: 'Gemini', color: 'bg-blue-500/20 text-blue-400' }
if (model.id.startsWith('claude')) return { label: 'Claude', color: 'bg-orange-500/20 text-orange-400' }
if (model.id.startsWith('gpt')) return { label: 'GPT', color: 'bg-green-500/20 text-green-400' }
return { label: model.family || 'Other', color: 'bg-zinc-700 text-zinc-400' }
}
return (
<div class="space-y-6 p-6">
{/* Header */}
<div class="flex items-center justify-between">
<div class="flex items-center gap-3">
<div class="p-2 bg-gradient-to-br from-purple-500/20 to-blue-500/20 rounded-lg">
<Rocket class="w-6 h-6 text-purple-400" />
</div>
<div>
<h2 class="text-xl font-semibold text-white">Antigravity</h2>
<p class="text-sm text-zinc-400">Premium models via Google authentication</p>
</div>
</div>
<div class="flex items-center gap-2">
{connectionStatus() === 'testing' && (
<span class="flex items-center gap-2 text-sm text-zinc-400">
<Loader class="w-4 h-4 animate-spin" />
Testing...
</span>
)}
{connectionStatus() === 'connected' && (
<span class="flex items-center gap-2 text-sm text-emerald-400">
<CheckCircle class="w-4 h-4" />
Connected
</span>
)}
{connectionStatus() === 'failed' && (
<span class="flex items-center gap-2 text-sm text-red-400">
<XCircle class="w-4 h-4" />
{offlineLabel()}
</span>
)}
</div>
</div>
{/* Info Banner */}
<div class="bg-gradient-to-r from-purple-500/10 via-blue-500/10 to-purple-500/10 border border-purple-500/20 rounded-xl p-4">
<div class="flex items-start gap-3">
<Sparkles class="w-5 h-5 text-purple-400 mt-0.5" />
<div>
<h3 class="font-semibold text-purple-300 mb-1">Premium AI Models</h3>
<p class="text-sm text-zinc-300">
Antigravity provides access to Gemini 3 Pro/Flash, Claude Sonnet 4.5, Claude Opus 4.5,
and GPT-OSS 120B through Google's infrastructure. Sign in with your Google account to get started.
</p>
</div>
</div>
</div>
{/* Authentication Section */}
<div class="bg-zinc-900/50 border border-zinc-800 rounded-xl p-4 space-y-4">
<div class="flex items-center justify-between">
<div class="flex items-center gap-3">
<Shield class="w-5 h-5 text-zinc-400" />
<div>
<h4 class="font-medium text-white">Google Authentication</h4>
<p class="text-xs text-zinc-500">
{authStatus() === 'authenticated'
? 'You are signed in and can use Antigravity models'
: 'Sign in with Google to access premium models'}
</p>
</div>
</div>
<Show when={authStatus() === 'authenticated'}>
<div class="flex items-center gap-3">
<span class="flex items-center gap-2 px-3 py-1.5 bg-emerald-500/20 text-emerald-400 rounded-lg text-sm">
<CheckCircle class="w-4 h-4" />
Authenticated
</span>
<button
onClick={signOut}
class="flex items-center gap-2 px-3 py-1.5 text-sm text-zinc-400 hover:text-white bg-zinc-800 hover:bg-zinc-700 rounded-lg transition-colors"
>
<LogOut class="w-4 h-4" />
Sign out
</button>
</div>
</Show>
</div>
{/* Not authenticated - show login button or device auth flow */}
<Show when={authStatus() === 'unauthenticated'}>
<Show when={!deviceAuthSession()}>
<button
onClick={startDeviceAuth}
disabled={isAuthenticating()}
class="w-full flex items-center justify-center gap-2 px-4 py-3 bg-purple-600 hover:bg-purple-500 disabled:bg-purple-600/50 text-white rounded-lg transition-colors"
>
{isAuthenticating() ? (
<>
<Loader class="w-5 h-5 animate-spin" />
Starting authentication...
</>
) : (
<>
<LogIn class="w-5 h-5" />
Sign in with Google
</>
)}
</button>
</Show>
{/* Device auth in progress - show code */}
<Show when={deviceAuthSession()}>
<div class="bg-purple-500/10 border border-purple-500/30 rounded-lg p-4 space-y-4">
<div class="text-center">
<Show
when={Boolean(deviceAuthSession()?.userCode)}
fallback={
<p class="text-sm text-zinc-300">
Complete the sign-in in the browser window.
</p>
}
>
<p class="text-sm text-zinc-300 mb-3">
Enter this code on the Google sign-in page:
</p>
<div class="flex items-center justify-center gap-3">
<code class="px-6 py-3 bg-zinc-900 rounded-lg text-2xl font-mono font-bold text-white tracking-widest">
{deviceAuthSession()?.userCode}
</code>
<button
onClick={copyCode}
class="p-2 text-zinc-400 hover:text-white bg-zinc-800 hover:bg-zinc-700 rounded-lg transition-colors"
title="Copy code"
>
{copied() ? <CheckCircle class="w-5 h-5 text-emerald-400" /> : <Copy class="w-5 h-5" />}
</button>
</div>
</Show>
</div>
<div class="flex items-center justify-center gap-2 text-sm text-purple-300">
<Loader class="w-4 h-4 animate-spin" />
Waiting for you to complete sign-in...
</div>
<div class="flex items-center justify-center gap-4">
<a
href={deviceAuthSession()?.verificationUrl}
target="_blank"
rel="noopener noreferrer"
class="flex items-center gap-2 px-4 py-2 bg-purple-600 hover:bg-purple-500 text-white rounded-lg text-sm transition-colors"
>
<ExternalLink class="w-4 h-4" />
Open Google Sign-in
</a>
<button
onClick={cancelAuth}
class="px-4 py-2 text-zinc-400 hover:text-white bg-zinc-800 hover:bg-zinc-700 rounded-lg text-sm transition-colors"
>
Cancel
</button>
</div>
</div>
</Show>
</Show>
<div class="mt-4 flex flex-col gap-2 text-sm text-zinc-400">
<label class="text-xs uppercase tracking-wide text-zinc-500">Project ID (optional)</label>
<input
type="text"
value={projectId()}
onInput={(event) => {
const value = event.currentTarget.value.trim()
setProjectId(value)
if (typeof window !== "undefined") {
const key = getUserScopedKey(ANTIGRAVITY_PROJECT_KEY)
if (value) {
window.localStorage.setItem(key, value)
} else {
window.localStorage.removeItem(key)
}
}
}}
class="w-full bg-zinc-900/70 border border-zinc-800 rounded-lg px-3 py-2 text-sm text-white placeholder:text-zinc-600 focus:outline-none focus:ring-2 focus:ring-purple-500/50"
placeholder="e.g. my-gcp-project-id"
/>
<p class="text-xs text-zinc-500">
Set this only if your account is tied to a specific Code Assist project.
</p>
<button
type="button"
onClick={() => testConnection()}
class="w-fit px-3 py-1.5 text-xs bg-zinc-800 hover:bg-zinc-700 rounded-lg text-zinc-200"
>
Re-check connection
</button>
</div>
</div>
{/* Error Display */}
<Show when={error()}>
<div class="p-4 bg-red-500/10 border border-red-500/30 rounded-lg text-red-400 text-sm">
{error()}
</div>
</Show>
<Show when={connectionIssue()}>
<div class="p-4 bg-amber-500/10 border border-amber-500/30 rounded-lg text-amber-200 text-sm space-y-2">
<div class="font-semibold">{connectionIssue()?.title}</div>
<div>{connectionIssue()?.message}</div>
<Show when={connectionIssue()?.link}>
<a
href={connectionIssue()?.link}
target="_blank"
rel="noopener noreferrer"
class="inline-flex items-center gap-2 text-amber-300 hover:text-amber-200 underline"
>
Learn more
</a>
</Show>
</div>
</Show>
{/* Models Grid */}
<div class="space-y-4">
<div class="flex items-center justify-between">
<h3 class="text-lg font-medium text-white">Available Models</h3>
<button
onClick={loadModels}
disabled={isLoading()}
class="flex items-center gap-2 px-3 py-1.5 text-sm bg-zinc-800 hover:bg-zinc-700 rounded-lg transition-colors"
>
{isLoading() ? <Loader class="w-4 h-4 animate-spin" /> : null}
Refresh
</button>
</div>
<Show when={isLoading()}>
<div class="flex items-center justify-center py-12">
<div class="flex items-center gap-3 text-zinc-400">
<Loader class="w-6 h-6 animate-spin" />
<span>Loading models...</span>
</div>
</div>
</Show>
<Show when={!isLoading() && models().length > 0}>
<div class="grid grid-cols-1 md:grid-cols-2 gap-4">
<For each={models()}>
{(model) => {
const family = getModelFamily(model)
return (
<div class="group bg-zinc-900/50 border border-zinc-800 hover:border-purple-500/50 rounded-xl p-4 transition-all">
<div class="flex items-start justify-between mb-3">
<div>
<h4 class="font-semibold text-white group-hover:text-purple-300 transition-colors">
{model.name}
</h4>
<p class="text-xs text-zinc-500 font-mono">{model.id}</p>
</div>
<span class={`px-2 py-0.5 text-[10px] font-bold uppercase rounded ${family.color}`}>
{family.label}
</span>
</div>
<div class="flex flex-wrap gap-2 mb-3">
{model.reasoning && (
<span class="px-2 py-0.5 text-[10px] bg-purple-500/20 text-purple-400 rounded">
Thinking
</span>
)}
{model.tool_call && (
<span class="px-2 py-0.5 text-[10px] bg-blue-500/20 text-blue-400 rounded">
Tool Use
</span>
)}
</div>
{model.limit && (
<div class="flex items-center gap-4 text-xs text-zinc-500">
<span>Context: {formatNumber(model.limit.context)}</span>
<span>Output: {formatNumber(model.limit.output)}</span>
</div>
)}
</div>
)
}}
</For>
</div>
</Show>
<Show when={!isLoading() && models().length === 0}>
<div class="text-center py-12 text-zinc-500">
<p>Models will be available after signing in.</p>
</div>
</Show>
</div>
{/* Usage Info */}
<div class="bg-zinc-900/50 border border-zinc-800 rounded-xl p-4">
<h4 class="font-medium text-white mb-2">How to Use</h4>
<ul class="text-sm text-zinc-400 space-y-1">
<li> Click "Sign in with Google" and enter the code on the Google page</li>
<li> Once authenticated, select any Antigravity model from the chat model picker</li>
<li> Models include Gemini 3, Claude Sonnet/Opus 4.5, and GPT-OSS</li>
<li> Thinking-enabled models show step-by-step reasoning</li>
<li> Full tool use and MCP support included</li>
</ul>
</div>
</div>
)
}
export default AntigravitySettings

View File

@@ -0,0 +1,319 @@
import { Component, createSignal, onMount, For, Show, createEffect, on } from "solid-js"
import { CheckCircle, XCircle, Loader, RefreshCw, Settings, AlertTriangle } from "lucide-solid"
import { userFetch } from "../../lib/user-context"
interface ApiStatus {
id: string
name: string
icon: string
enabled: boolean
connected: boolean
checking: boolean
error?: string
lastChecked?: number
}
interface ApiStatusCheck {
id: string
name: string
icon: string
checkEnabled: () => Promise<boolean>
testConnection: () => Promise<boolean>
}
const API_CHECKS: ApiStatusCheck[] = [
{
id: "opencode-zen",
name: "OpenCode Zen",
icon: "🆓",
checkEnabled: async () => true, // Always available
testConnection: async () => {
try {
const res = await userFetch("/api/opencode-zen/test")
if (!res.ok) return false
const data = await res.json()
return data.connected === true
} catch {
return false
}
},
},
{
id: "ollama-cloud",
name: "Ollama Cloud",
icon: "🦙",
checkEnabled: async () => {
try {
const res = await userFetch("/api/ollama/config")
if (!res.ok) return false
const data = await res.json()
return data.config?.enabled === true
} catch {
return false
}
},
testConnection: async () => {
try {
const res = await userFetch("/api/ollama/test", { method: "POST" })
if (!res.ok) return false
const data = await res.json()
return data.connected === true
} catch {
return false
}
},
},
{
id: "zai",
name: "Z.AI Plan",
icon: "🧠",
checkEnabled: async () => {
try {
const res = await userFetch("/api/zai/config")
if (!res.ok) return false
const data = await res.json()
return data.config?.enabled === true
} catch {
return false
}
},
testConnection: async () => {
try {
const res = await userFetch("/api/zai/test", { method: "POST" })
if (!res.ok) return false
const data = await res.json()
return data.connected === true
} catch {
return false
}
},
},
{
id: "qwen-oauth",
name: "Qwen Code",
icon: "🔷",
checkEnabled: async () => {
const token = localStorage.getItem("qwen_oauth_token")
return token !== null && token.length > 0
},
testConnection: async () => {
try {
const tokenStr = localStorage.getItem("qwen_oauth_token")
if (!tokenStr) return false
const token = JSON.parse(tokenStr)
// Check if token is expired
const expiresAt = (token.created_at || 0) + (token.expires_in || 0) * 1000
return Date.now() < expiresAt
} catch {
return false
}
},
},
{
id: "antigravity",
name: "Antigravity",
icon: "🚀",
checkEnabled: async () => {
const token = localStorage.getItem("antigravity_oauth_token")
return token !== null && token.length > 0
},
testConnection: async () => {
try {
const tokenStr = localStorage.getItem("antigravity_oauth_token")
if (!tokenStr) return false
const token = JSON.parse(tokenStr)
const expiresAt = (token.created_at || 0) + (token.expires_in || 0) * 1000
return Date.now() < expiresAt
} catch {
return false
}
},
},
]
interface ApiStatusCheckerProps {
onSettingsClick?: (apiId: string) => void
compact?: boolean
}
const ApiStatusChecker: Component<ApiStatusCheckerProps> = (props) => {
const [statuses, setStatuses] = createSignal<ApiStatus[]>([])
const [isChecking, setIsChecking] = createSignal(false)
const [lastFullCheck, setLastFullCheck] = createSignal<number>(0)
const checkAllApis = async () => {
setIsChecking(true)
const results: ApiStatus[] = []
for (const api of API_CHECKS) {
setStatuses((prev) => {
const existing = prev.find((s) => s.id === api.id)
if (existing) {
return prev.map((s) => (s.id === api.id ? { ...s, checking: true } : s))
}
return [...prev, { id: api.id, name: api.name, icon: api.icon, enabled: false, connected: false, checking: true }]
})
try {
const enabled = await api.checkEnabled()
let connected = false
let error: string | undefined
if (enabled) {
try {
connected = await api.testConnection()
} catch (e) {
error = e instanceof Error ? e.message : "Connection test failed"
}
}
results.push({
id: api.id,
name: api.name,
icon: api.icon,
enabled,
connected,
checking: false,
error,
lastChecked: Date.now(),
})
} catch (e) {
results.push({
id: api.id,
name: api.name,
icon: api.icon,
enabled: false,
connected: false,
checking: false,
error: e instanceof Error ? e.message : "Check failed",
lastChecked: Date.now(),
})
}
}
setStatuses(results)
setLastFullCheck(Date.now())
setIsChecking(false)
}
onMount(() => {
checkAllApis()
})
const getStatusIcon = (status: ApiStatus) => {
if (status.checking) {
return <Loader class="w-4 h-4 animate-spin text-gray-400" />
}
if (!status.enabled) {
return <div class="w-4 h-4 rounded-full bg-gray-300 dark:bg-gray-600" />
}
if (status.connected) {
return <CheckCircle class="w-4 h-4 text-green-500" />
}
if (status.error) {
return <XCircle class="w-4 h-4 text-red-500" />
}
return <AlertTriangle class="w-4 h-4 text-yellow-500" />
}
const getStatusText = (status: ApiStatus) => {
if (status.checking) return "Checking..."
if (!status.enabled) return "Not configured"
if (status.connected) return "Connected"
if (status.error) return status.error
return "Connection failed"
}
const enabledCount = () => statuses().filter((s) => s.enabled && s.connected).length
const totalConfigured = () => statuses().filter((s) => s.enabled).length
if (props.compact) {
return (
<div class="flex items-center gap-2 px-3 py-2 bg-gray-50 dark:bg-gray-800/50 rounded-lg">
<span class="text-xs text-gray-500">APIs:</span>
<div class="flex items-center gap-1">
<For each={statuses()}>
{(status) => (
<div
class="cursor-pointer hover:scale-110 transition-transform"
title={`${status.name}: ${getStatusText(status)}`}
onClick={() => props.onSettingsClick?.(status.id)}
>
<span class="text-sm">{status.icon}</span>
<Show when={status.enabled}>
<span
class={`inline-block w-1.5 h-1.5 rounded-full ml-0.5 ${status.connected ? "bg-green-500" : status.checking ? "bg-yellow-500" : "bg-red-500"
}`}
/>
</Show>
</div>
)}
</For>
</div>
<button
class="p-1 hover:bg-gray-200 dark:hover:bg-gray-700 rounded"
onClick={checkAllApis}
disabled={isChecking()}
title="Refresh API status"
>
<RefreshCw class={`w-3 h-3 ${isChecking() ? "animate-spin" : ""}`} />
</button>
</div>
)
}
return (
<div class="space-y-4">
<div class="flex items-center justify-between">
<div>
<h3 class="text-lg font-semibold">API Connections</h3>
<p class="text-sm text-gray-500">
{enabledCount()} of {totalConfigured()} APIs connected
</p>
</div>
<button
class="flex items-center gap-2 px-3 py-1.5 text-sm bg-blue-500 hover:bg-blue-600 text-white rounded-lg disabled:opacity-50"
onClick={checkAllApis}
disabled={isChecking()}
>
<RefreshCw class={`w-4 h-4 ${isChecking() ? "animate-spin" : ""}`} />
{isChecking() ? "Checking..." : "Refresh All"}
</button>
</div>
<div class="grid gap-3">
<For each={statuses()}>
{(status) => (
<div class="flex items-center justify-between p-3 bg-gray-50 dark:bg-gray-800/50 rounded-lg border border-gray-200 dark:border-gray-700">
<div class="flex items-center gap-3">
<span class="text-xl">{status.icon}</span>
<div>
<div class="font-medium">{status.name}</div>
<div class="text-xs text-gray-500">{getStatusText(status)}</div>
</div>
</div>
<div class="flex items-center gap-2">
{getStatusIcon(status)}
<button
class="p-1.5 hover:bg-gray-200 dark:hover:bg-gray-700 rounded"
onClick={() => props.onSettingsClick?.(status.id)}
title="Configure"
>
<Settings class="w-4 h-4" />
</button>
</div>
</div>
)}
</For>
</div>
<Show when={lastFullCheck() > 0}>
<p class="text-xs text-gray-400 text-center">
Last checked: {new Date(lastFullCheck()).toLocaleTimeString()}
</p>
</Show>
</div>
)
}
export default ApiStatusChecker

View File

@@ -4,6 +4,7 @@ import { Button } from '@suid/material'
import { Cloud, CheckCircle, XCircle, Loader } from 'lucide-solid'
import { instances } from '../../stores/instances'
import { fetchProviders } from '../../stores/session-api'
import { userFetch } from '../../lib/user-context'
interface OllamaCloudConfig {
enabled: boolean
@@ -34,7 +35,7 @@ const OllamaCloudSettings: Component = () => {
// Load config on mount
onMount(async () => {
try {
const response = await fetch('/api/ollama/config')
const response = await userFetch('/api/ollama/config')
if (response.ok) {
const data = await response.json()
const maskedKey = typeof data.config?.apiKey === "string" && /^\*+$/.test(data.config.apiKey)
@@ -62,7 +63,7 @@ const OllamaCloudSettings: Component = () => {
delete payload.apiKey
}
const response = await fetch('/api/ollama/config', {
const response = await userFetch('/api/ollama/config', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(payload)
@@ -101,7 +102,7 @@ const OllamaCloudSettings: Component = () => {
setConnectionStatus('testing')
try {
const response = await fetch('/api/ollama/test', {
const response = await userFetch('/api/ollama/test', {
method: 'POST'
})
@@ -140,7 +141,7 @@ const OllamaCloudSettings: Component = () => {
const loadModels = async () => {
setIsLoadingModels(true)
try {
const response = await fetch('/api/ollama/models')
const response = await userFetch('/api/ollama/models')
if (response.ok) {
const data = await response.json()
// Handle different response formats

View File

@@ -2,6 +2,7 @@ import { Component, createSignal, onMount, Show } from 'solid-js'
import toast from 'solid-toast'
import { Button } from '@suid/material'
import { Cpu, CheckCircle, XCircle, Loader, Key, ExternalLink } from 'lucide-solid'
import { userFetch } from '../../lib/user-context'
interface ZAIConfig {
enabled: boolean
@@ -19,7 +20,7 @@ const ZAISettings: Component = () => {
// Load config on mount
onMount(async () => {
try {
const response = await fetch('/api/zai/config')
const response = await userFetch('/api/zai/config')
if (response.ok) {
const data = await response.json()
setConfig(data.config)
@@ -37,7 +38,7 @@ const ZAISettings: Component = () => {
const saveConfig = async () => {
setIsLoading(true)
try {
const response = await fetch('/api/zai/config', {
const response = await userFetch('/api/zai/config', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(config())
@@ -66,7 +67,7 @@ const ZAISettings: Component = () => {
setConnectionStatus('testing')
try {
const response = await fetch('/api/zai/test', {
const response = await userFetch('/api/zai/test', {
method: 'POST'
})
@@ -104,7 +105,7 @@ const ZAISettings: Component = () => {
const loadModels = async () => {
try {
const response = await fetch('/api/zai/models')
const response = await userFetch('/api/zai/models')
if (response.ok) {
const data = await response.json()
setModels(data.models.map((m: any) => m.name))
@@ -186,7 +187,7 @@ const ZAISettings: Component = () => {
<label class="block font-medium mb-2">Endpoint</label>
<input
type="text"
placeholder="https://api.z.ai/api/paas/v4"
placeholder="https://api.z.ai/api"
value={config().endpoint || ''}
onChange={(e) => handleConfigChange('endpoint', e.target.value)}
class="w-full px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 bg-white dark:bg-gray-800"

View File

@@ -52,7 +52,7 @@ interface ToolCallProps {
instanceId: string
sessionId: string
onContentRendered?: () => void
}
}
@@ -671,6 +671,7 @@ export default function ToolCall(props: ToolCallProps) {
<Markdown
part={markdownPart}
isDark={isDark()}
instanceId={props.instanceId}
disableHighlight={disableHighlight}
onRendered={handleMarkdownRendered}
/>

View File

@@ -17,10 +17,9 @@
padding: 0;
}
html,
body {
body,
#root {
font-family: var(--font-family-sans);
font-size: var(--font-size-base);
line-height: var(--line-height-normal);
@@ -29,45 +28,11 @@ body {
-moz-osx-font-smoothing: grayscale;
background-color: var(--surface-base);
color: var(--text-primary);
margin: 0;
padding: 0;
overflow: hidden;
width: 100%;
height: 100%;
margin: 0 !important;
padding: 0 !important;
overflow: hidden !important;
width: 100vw !important;
height: 100vh !important;
display: flex !important;
flex-direction: column !important;
}
#root {
width: 100%;
height: 100%;
background-color: var(--surface-base);
overflow: hidden;
}

View File

@@ -0,0 +1,249 @@
/**
* Agent Status Detection Module
*
* Provides intelligent detection of when an agent is still "working" even after
* streaming has technically completed. This handles cases where:
* 1. Agent outputs "standby", "processing", "working" messages
* 2. Agent is in multi-step reasoning mode
* 3. Ollama models pause between thinking and output phases
*/
import type { MessageRecord } from "../stores/message-v2/types"
// Keywords that indicate the agent is still processing
const WORKING_KEYWORDS = [
"standby",
"stand by",
"processing",
"please wait",
"working on",
"analyzing",
"thinking",
"computing",
"calculating",
"evaluating",
"generating",
"preparing",
"loading",
"fetching",
"retrieving",
"in progress",
"one moment",
"hold on",
"just a sec",
"give me a moment",
"let me",
"i'll",
"i will",
"checking",
"scanning",
"searching",
"looking",
"finding"
] as const
// Keywords that indicate the agent has finished
const COMPLETION_KEYWORDS = [
"here is",
"here's",
"here are",
"done",
"complete",
"finished",
"result",
"solution",
"answer",
"output",
"summary",
"conclusion",
"final",
"successfully",
"implemented",
"fixed",
"resolved",
"created",
"updated"
] as const
// Patterns that strongly indicate agent is still working
const WORKING_PATTERNS = [
/stand\s*by/i,
/processing\s*(complete\s*)?data/i,
/please\s+wait/i,
/working\s+on/i,
/analyzing/i,
/\bwait\b/i,
/\bone\s+moment\b/i,
/\bhold\s+on\b/i,
/\.\.\.\s*$/, // Ends with ellipsis
/…\s*$/, // Ends with unicode ellipsis
] as const
/**
* Extracts text content from a message's parts
*/
function extractMessageText(message: MessageRecord): string {
const textParts: string[] = []
for (const partId of message.partIds) {
const part = message.parts[partId]
if (part?.data) {
const data = part.data as Record<string, unknown>
if (data.type === "text" && typeof data.text === "string") {
textParts.push(data.text)
}
}
}
return textParts.join("\n")
}
/**
* Gets the last N characters of a message for keyword detection
*/
function getRecentContent(message: MessageRecord, charLimit = 500): string {
const fullText = extractMessageText(message)
if (fullText.length <= charLimit) {
return fullText.toLowerCase()
}
return fullText.slice(-charLimit).toLowerCase()
}
/**
* Checks if the message content indicates the agent is still working
*/
export function detectAgentWorkingState(message: MessageRecord | null | undefined): {
isWorking: boolean
reason?: string
confidence: "high" | "medium" | "low"
} {
if (!message) {
return { isWorking: false, confidence: "high" }
}
// If message status is streaming or sending, definitely working
if (message.status === "streaming" || message.status === "sending") {
return { isWorking: true, reason: "Active streaming", confidence: "high" }
}
// Get recent content to analyze
const recentContent = getRecentContent(message)
if (!recentContent) {
return { isWorking: false, confidence: "high" }
}
// Check for working patterns with high confidence
for (const pattern of WORKING_PATTERNS) {
if (pattern.test(recentContent)) {
return {
isWorking: true,
reason: `Pattern match: ${pattern.source}`,
confidence: "high"
}
}
}
// Check if recent content ends with working keywords
const lastLine = recentContent.split("\n").pop()?.trim() || ""
for (const keyword of WORKING_KEYWORDS) {
if (lastLine.includes(keyword)) {
// Check if there's also a completion keyword nearby
const hasCompletionNearby = COMPLETION_KEYWORDS.some(ck =>
recentContent.slice(-200).includes(ck)
)
if (!hasCompletionNearby) {
return {
isWorking: true,
reason: `Working keyword: "${keyword}"`,
confidence: "medium"
}
}
}
}
// Check message age - if very recent and short, might still be working
const now = Date.now()
const messageAge = now - message.updatedAt
const contentLength = extractMessageText(message).length
// If message was updated very recently (< 2s) and content is short
if (messageAge < 2000 && contentLength < 100) {
return {
isWorking: true,
reason: "Recently updated with short content",
confidence: "low"
}
}
return { isWorking: false, confidence: "high" }
}
/**
* Check if the last assistant message indicates agent is still conceptually working
*/
export function isAgentConceptuallyThinking(
messages: MessageRecord[],
lastAssistantMessage: MessageRecord | null | undefined
): boolean {
if (!lastAssistantMessage) {
return false
}
// Check if message status indicates active work
if (lastAssistantMessage.status === "streaming" ||
lastAssistantMessage.status === "sending") {
return true
}
// Use semantic detection
const workingState = detectAgentWorkingState(lastAssistantMessage)
return workingState.isWorking
}
/**
* Get a user-friendly status message for the current agent state
*/
export function getAgentStatusMessage(
message: MessageRecord | null | undefined
): string | null {
if (!message) {
return null
}
const workingState = detectAgentWorkingState(message)
if (!workingState.isWorking) {
return null
}
if (message.status === "streaming") {
return "Streaming..."
}
if (message.status === "sending") {
return "Sending..."
}
// Based on reason
if (workingState.reason?.includes("standby") ||
workingState.reason?.includes("stand by")) {
return "Agent processing..."
}
if (workingState.reason?.includes("processing")) {
return "Processing..."
}
if (workingState.reason?.includes("analyzing")) {
return "Analyzing..."
}
if (workingState.reason?.includes("ellipsis")) {
return "Thinking..."
}
return "Working..."
}

View File

@@ -28,6 +28,7 @@ import type {
PortAvailabilityResponse,
} from "../../../server/src/api-types"
import { getLogger } from "./logger"
import { getUserHeaders } from "./user-context"
const FALLBACK_API_BASE = "http://127.0.0.1:9898"
const RUNTIME_BASE = typeof window !== "undefined" ? window.location?.origin : undefined
@@ -87,8 +88,10 @@ function logHttp(message: string, context?: Record<string, unknown>) {
async function request<T>(path: string, init?: RequestInit): Promise<T> {
const url = API_BASE_ORIGIN ? new URL(path, API_BASE_ORIGIN).toString() : path
const userHeaders = getUserHeaders()
const headers: HeadersInit = {
"Content-Type": "application/json",
...userHeaders,
...(init?.headers ?? {}),
}
@@ -117,6 +120,9 @@ async function request<T>(path: string, init?: RequestInit): Promise<T> {
export const serverApi = {
getApiBase(): string {
return API_BASE_ORIGIN
},
fetchWorkspaces(): Promise<WorkspaceDescriptor[]> {
return request<WorkspaceDescriptor[]>("/api/workspaces")
},
@@ -186,6 +192,20 @@ export const serverApi = {
body: JSON.stringify({ config }),
})
},
fetchWorkspaceMcpStatus(id: string): Promise<{
servers: Record<string, { connected: boolean }>
toolCount: number
tools: Array<{ name: string; server: string; description: string }>
}> {
return request(`/api/workspaces/${encodeURIComponent(id)}/mcp-status`)
},
connectWorkspaceMcps(id: string): Promise<{
success: boolean
servers: Record<string, { connected: boolean }>
toolCount: number
}> {
return request(`/api/workspaces/${encodeURIComponent(id)}/mcp-connect`, { method: "POST" })
},
fetchConfig(): Promise<AppConfig> {
return request<AppConfig>("/api/config/app")

View File

@@ -0,0 +1,334 @@
/**
* Lite Mode API Client - Binary-Free Mode
*
* This provides a client for working with NomadArch in Binary-Free Mode,
* using native session management instead of the OpenCode binary.
*/
import { CODENOMAD_API_BASE } from "./api-client"
import { getLogger } from "./logger"
const log = getLogger("lite-mode")
export interface ModeInfo {
mode: "lite" | "full"
binaryFreeMode: boolean
nativeSessions: boolean
opencodeBinaryAvailable: boolean
providers: {
qwen: boolean
zai: boolean
zen: boolean
}
}
export interface NativeSession {
id: string
workspaceId: string
title?: string
parentId?: string | null
createdAt: number
updatedAt: number
messageIds: string[]
model?: {
providerId: string
modelId: string
}
agent?: string
}
export interface NativeMessage {
id: string
sessionId: string
role: "user" | "assistant" | "system" | "tool"
content?: string
createdAt: number
updatedAt: number
status?: "pending" | "streaming" | "completed" | "error"
}
let modeCache: ModeInfo | null = null
/**
* Get the current running mode (lite or full)
*/
export async function getMode(): Promise<ModeInfo> {
if (modeCache) return modeCache
try {
const response = await fetch(`${CODENOMAD_API_BASE}/api/meta/mode`)
if (!response.ok) {
throw new Error(`Failed to fetch mode: ${response.status}`)
}
modeCache = await response.json()
log.info(`Running in ${modeCache?.mode} mode`, { binaryFree: modeCache?.binaryFreeMode })
return modeCache!
} catch (error) {
log.warn("Failed to fetch mode, assuming lite mode", error)
// Default to lite mode if we can't determine
return {
mode: "lite",
binaryFreeMode: true,
nativeSessions: true,
opencodeBinaryAvailable: false,
providers: { qwen: true, zai: true, zen: true }
}
}
}
/**
* Check if running in Binary-Free (lite) mode
*/
export async function isLiteMode(): Promise<boolean> {
const mode = await getMode()
return mode.binaryFreeMode
}
/**
* Native Session API for Binary-Free Mode
*/
export const nativeSessionApi = {
async listSessions(workspaceId: string): Promise<NativeSession[]> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions`)
if (!response.ok) throw new Error("Failed to list sessions")
const data = await response.json()
return data.sessions
},
async createSession(workspaceId: string, options?: {
title?: string
parentId?: string
model?: { providerId: string; modelId: string }
agent?: string
}): Promise<NativeSession> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(options ?? {})
})
if (!response.ok) throw new Error("Failed to create session")
const data = await response.json()
return data.session
},
async getSession(workspaceId: string, sessionId: string): Promise<NativeSession | null> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/${encodeURIComponent(sessionId)}`)
if (response.status === 404) return null
if (!response.ok) throw new Error("Failed to get session")
const data = await response.json()
return data.session
},
async updateSession(workspaceId: string, sessionId: string, updates: Partial<NativeSession>): Promise<NativeSession | null> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/${encodeURIComponent(sessionId)}`, {
method: "PATCH",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(updates)
})
if (response.status === 404) return null
if (!response.ok) throw new Error("Failed to update session")
const data = await response.json()
return data.session
},
async deleteSession(workspaceId: string, sessionId: string): Promise<boolean> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/${encodeURIComponent(sessionId)}`, {
method: "DELETE"
})
return response.ok || response.status === 204
},
async forkSession(workspaceId: string, sessionId: string): Promise<NativeSession> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/${encodeURIComponent(sessionId)}/fork`, {
method: "POST"
})
if (!response.ok) throw new Error("Failed to fork session")
const data = await response.json()
return data.session
},
async revertSession(workspaceId: string, sessionId: string, messageId?: string): Promise<NativeSession> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/${encodeURIComponent(sessionId)}/revert`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ messageId })
})
if (!response.ok) throw new Error("Failed to revert session")
const data = await response.json()
return data.session
},
async getMessages(workspaceId: string, sessionId: string): Promise<NativeMessage[]> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/${encodeURIComponent(sessionId)}/messages`)
if (!response.ok) throw new Error("Failed to get messages")
const data = await response.json()
return data.messages
},
async appendMessages(
workspaceId: string,
sessionId: string,
messages: Array<{
id?: string
role: "user" | "assistant" | "system" | "tool"
content?: string
createdAt?: number
updatedAt?: number
status?: "pending" | "streaming" | "completed" | "error"
}>
): Promise<NativeMessage[]> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/${encodeURIComponent(sessionId)}/messages`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ messages })
})
if (!response.ok) throw new Error("Failed to append messages")
const data = await response.json()
return data.messages
},
/**
* Import sessions from SDK mode to Native mode
*/
async importSessions(workspaceId: string, sessions: Array<{
id: string
title?: string
parentId?: string | null
createdAt?: number
updatedAt?: number
model?: { providerId: string; modelId: string }
agent?: string
messages?: Array<{
id: string
role: "user" | "assistant" | "system" | "tool"
content?: string
createdAt?: number
}>
}>): Promise<{ success: boolean; imported: number; skipped: number }> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/import`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ sessions })
})
if (!response.ok) throw new Error("Failed to import sessions")
return response.json()
},
/**
* Sync sessions from SDK (OpenCode) to Native mode
* This reads sessions directly from OpenCode's storage
*/
async syncFromSdk(workspaceId: string, folderPath: string): Promise<{
success: boolean
imported: number
skipped: number
total?: number
message?: string
}> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sync-sdk`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ folderPath })
})
if (!response.ok) throw new Error("Failed to sync SDK sessions")
return response.json()
},
/**
* Check if SDK sessions exist for a folder
*/
async checkSdkSessions(folderPath: string): Promise<{
found: boolean
count: number
sessions: Array<{ id: string; title: string; created: number }>
}> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/check-sdk-sessions`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ folderPath })
})
if (!response.ok) throw new Error("Failed to check SDK sessions")
return response.json()
},
/**
* Send a prompt to the session and get a streaming response
*/
async* streamPrompt(
workspaceId: string,
sessionId: string,
content: string,
options?: {
provider?: "qwen" | "zai" | "zen"
accessToken?: string
resourceUrl?: string
enableTools?: boolean
}
): AsyncGenerator<{ type: "content" | "done" | "error"; data?: string }> {
const response = await fetch(`${CODENOMAD_API_BASE}/api/native/workspaces/${encodeURIComponent(workspaceId)}/sessions/${encodeURIComponent(sessionId)}/prompt`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
content,
provider: options?.provider ?? "qwen",
accessToken: options?.accessToken,
resourceUrl: options?.resourceUrl,
enableTools: options?.enableTools ?? true
})
})
if (!response.ok) {
yield { type: "error", data: `Request failed: ${response.status}` }
return
}
const reader = response.body?.getReader()
if (!reader) {
yield { type: "error", data: "No response body" }
return
}
const decoder = new TextDecoder()
let buffer = ""
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split("\n")
buffer = lines.pop() ?? ""
for (const line of lines) {
if (!line.trim()) continue
if (line.startsWith("data: ")) {
const data = line.slice(6)
if (data === "[DONE]") {
yield { type: "done" }
return
}
try {
const parsed = JSON.parse(data)
if (parsed.error) {
yield { type: "error", data: parsed.error }
} else if (parsed.choices?.[0]?.delta?.content) {
yield { type: "content", data: parsed.choices[0].delta.content }
}
} catch {
// Skip invalid JSON
}
}
}
}
yield { type: "done" }
}
}
/**
* Clear mode cache (for testing or after config changes)
*/
export function clearModeCache(): void {
modeCache = null
}

View File

@@ -260,9 +260,21 @@ function setupRenderer(isDark: boolean) {
const resolvedLang = lang && lang.trim() ? lang.trim() : "text"
const escapedLang = escapeHtml(resolvedLang)
const previewButton = resolvedLang === "html" ? `
<button class="code-block-preview" data-code="${encodedCode}" data-lang="${escapedLang}">
<svg class="preview-icon" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<path d="M1 12s4-8 11-8 11 8 11 8-4 8-11 8-11-8-11-8z"></path>
<circle cx="12" cy="12" r="3"></circle>
</svg>
<span class="preview-text">Preview</span>
</button>` : "";
const header = `
<div class="code-block-header">
<div class="flex items-center gap-2">
<span class="code-block-language">${escapedLang}</span>
${previewButton}
</div>
<button class="code-block-copy" data-code="${encodedCode}">
<svg class="copy-icon" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2">
<rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect>

View File

@@ -20,8 +20,9 @@ function detectHost(): HostRuntime {
return "web"
}
const win = window as Window & { electronAPI?: unknown }
if (typeof win.electronAPI !== "undefined") {
// Check for common Electron injection patterns
const win = window as any
if (win.electronAPI || win.electron || win.ipcRenderer || win.process?.versions?.electron) {
return "electron"
}

View File

@@ -11,6 +11,7 @@ const DEFAULT_INSTANCE_DATA: InstanceData = {
messageHistory: [],
agentModelSelections: {},
sessionTasks: {},
sessionMessages: {},
}
function isDeepEqual(a: unknown, b: unknown): boolean {
@@ -157,11 +158,13 @@ export class ServerStorage {
const messageHistory = Array.isArray(source.messageHistory) ? [...source.messageHistory] : []
const agentModelSelections = { ...(source.agentModelSelections ?? {}) }
const sessionTasks = { ...(source.sessionTasks ?? {}) }
const sessionMessages = { ...(source.sessionMessages ?? {}) }
return {
...source,
messageHistory,
agentModelSelections,
sessionTasks,
sessionMessages,
}
}

View File

@@ -0,0 +1,157 @@
import { createSignal } from "solid-js"
import { isElectronHost } from "./runtime-env"
// Storage key for active user
const ACTIVE_USER_KEY = "codenomad_active_user_id"
const [isLoggedIn, setLoggedIn] = createSignal(false)
const [isInitialized, setInitialized] = createSignal(false)
export { isLoggedIn, setLoggedIn, isInitialized }
/**
* Set the active user ID
*/
export function setActiveUserId(userId: string | null): void {
if (userId) {
localStorage.setItem(ACTIVE_USER_KEY, userId)
setLoggedIn(true)
console.log(`[UserContext] Active user set to: ${userId}`)
} else {
localStorage.removeItem(ACTIVE_USER_KEY)
setLoggedIn(false)
console.log(`[UserContext] Active user cleared`)
}
}
/**
* Get the active user ID
*/
export function getActiveUserId(): string | null {
return localStorage.getItem(ACTIVE_USER_KEY)
}
/**
* Get headers with user ID for API requests
*/
export function getUserHeaders(): Record<string, string> {
const userId = getActiveUserId()
if (userId) {
return { "X-User-Id": userId }
}
return {}
}
/**
* Create fetch options with user headers
*/
export function withUserHeaders(options: RequestInit = {}): RequestInit {
const userHeaders = getUserHeaders()
if (Object.keys(userHeaders).length === 0) return options
const headers = new Headers(options.headers || {})
for (const [key, value] of Object.entries(userHeaders)) {
headers.set(key, value)
}
return {
...options,
headers,
}
}
/**
* Fetch wrapper that automatically includes user headers
*/
export async function userFetch(url: string | URL | Request, options: RequestInit = {}): Promise<Response> {
return fetch(url, withUserHeaders(options))
}
/**
* Globally patch fetch to include user headers for all internal /api/* requests
* This ensures compatibility with legacy code and 3rd party libraries.
*/
export function patchFetch(): void {
if ((window as any)._codenomad_fetch_patched) return
(window as any)._codenomad_fetch_patched = true
const originalFetch = window.fetch
window.fetch = async function (input: RequestInfo | URL, init?: RequestInit) {
let url = ""
if (typeof input === "string") {
url = input
} else if (input instanceof URL) {
url = input.toString()
} else if (input instanceof Request) {
url = input.url
}
// Only inject headers for internal API calls
if (url.startsWith("/api/") || url.includes(window.location.origin + "/api/")) {
return originalFetch(input, withUserHeaders(init))
}
return originalFetch(input, init)
}
console.log("[UserContext] Global fetch patched for /api/* requests")
}
/**
* Initialize user context from Host (Electron/Tauri) or API
* Call this on app startup
*/
export async function initializeUserContext(): Promise<void> {
console.log(`[UserContext] Initializing... host=${isElectronHost()}`)
try {
if (isElectronHost()) {
const api = (window as any).electronAPI
if (api && api.getActiveUser) {
console.log(`[UserContext] Requesting active user via api.getActiveUser()...`)
const activeUser = await api.getActiveUser()
console.log(`[UserContext] getActiveUser result:`, activeUser)
if (activeUser?.id) {
console.log(`[UserContext] Host has active session: ${activeUser.id}`)
setActiveUserId(activeUser.id)
} else {
console.log(`[UserContext] Host has no active session. Enforcing login.`)
setActiveUserId(null)
}
} else {
console.warn(`[UserContext] electronAPI.getActiveUser not found. Falling back to web mode.`)
await handleWebInit()
}
} else {
await handleWebInit()
}
} catch (error) {
console.error(`[UserContext] Critical initialization error:`, error)
setActiveUserId(null)
} finally {
setInitialized(true)
}
}
async function handleWebInit() {
console.log(`[UserContext] Web init - checking local cache...`)
const existingId = getActiveUserId()
// In "Mandatory Login" mode, we might want to clear this on every fresh load
// but for now let's see if the server validates it.
if (existingId) {
// We could verify this ID with the server here if we had a /api/users/me endpoint
// For now, let's keep it but mark it as "unverified" or just let the first API fail
console.log(`[UserContext] Found cached ID: ${existingId}. Validating session...`)
// Strategy: We want mandatory login. If this is a fresh launch, we should probably clear it.
// For Electron it's already cleared in main.ts. For Web it's tricky.
// Let's lean towards SECURITY: if no one explicitly logged in THIS RUN, show login.
// Actually, if we are in Electron and we hit this, it's because IPC failed.
// If we are in Web, we trust it for now but we'll see.
setLoggedIn(true)
} else {
console.log(`[UserContext] No cached ID found.`)
setLoggedIn(false)
}
}

View File

@@ -1,9 +1,13 @@
import { render } from "solid-js/web"
import { Show, onMount } from "solid-js"
import App from "./App"
import { ThemeProvider } from "./lib/theme"
import { ConfigProvider } from "./stores/preferences"
import { InstanceConfigProvider } from "./stores/instance-config"
import { runtimeEnv } from "./lib/runtime-env"
import LoginView from "./components/auth/LoginView"
import { isLoggedIn, initializeUserContext, patchFetch, isInitialized } from "./lib/user-context"
import { Toaster } from "solid-toast"
import "./index.css"
import "@git-diff-view/solid/styles/diff-view-pure.css"
@@ -18,8 +22,30 @@ if (typeof document !== "undefined") {
document.documentElement.dataset.runtimePlatform = runtimeEnv.platform
}
render(
() => (
const Root = () => {
onMount(() => {
patchFetch()
initializeUserContext()
})
return (
<>
<Toaster
position="top-right"
gutter={8}
toastOptions={{
style: {
background: "#1a1a1a",
color: "#fff",
border: "1px solid rgba(255,255,255,0.1)",
},
}}
/>
<Show when={isInitialized()}>
<Show
when={isLoggedIn()}
fallback={<LoginView onLoginSuccess={() => initializeUserContext()} />}
>
<ConfigProvider>
<InstanceConfigProvider>
<ThemeProvider>
@@ -27,6 +53,10 @@ render(
</ThemeProvider>
</InstanceConfigProvider>
</ConfigProvider>
),
root,
)
</Show>
</Show>
</>
)
}
render(() => <Root />, root)

View File

@@ -202,7 +202,7 @@ function LoadingApp() {
<img src={iconUrl} alt="NomadArch" class="loading-logo" width="180" height="180" />
<div class="loading-heading">
<h1 class="loading-title">NomadArch 1.0</h1>
<p class="loading-subtitle" style={{ "font-size": '14px', "color": '#666', "margin-top": '4px' }}>A fork of OpenCode</p>
<p class="loading-subtitle" style={{ "font-size": '14px', "color": '#666', "margin-top": '4px' }}>An enhanced fork of CodeNomad</p>
<Show when={status()}>{(statusText) => <p class="loading-status">{statusText()}</p>}</Show>
</div>
<div class="loading-card">

View File

@@ -0,0 +1,160 @@
/**
* Compaction Service
*
* Integrates opencode-style compaction logic for managing context window:
* - Detect token overflow
* - Prune old tool outputs (keep last 40k tokens protected)
* - Track compacted parts with timestamps
*/
import { getLogger } from "@/lib/logger";
const log = getLogger("compaction-service");
// Configuration from opencode
export const PRUNE_MINIMUM = 20_000; // Minimum tokens to prune before triggering
export const PRUNE_PROTECT = 40_000; // Protect last N tokens of tool outputs
export const PRUNE_PROTECTED_TOOLS = ["skill"]; // Tools to never prune
export interface CompactionConfig {
contextLimit: number;
outputMax: number;
}
export interface TokenInfo {
input: number;
output: number;
cacheRead?: number;
cacheWrite?: number;
}
export interface PruneResult {
pruned: number;
total: number;
partsCount: number;
}
/**
* Check if context is overflowing and compaction is needed
*/
export function isOverflow(tokens: TokenInfo, model: CompactionConfig): boolean {
const context = model.contextLimit;
if (context === 0) return false;
const count = tokens.input + (tokens.cacheRead || 0) + tokens.output;
const output = Math.min(model.outputMax, 32000) || 32000;
const usable = context - output;
return count > usable;
}
/**
* Calculate how many tokens to prune from old tool outputs
*/
export function calculatePruneTarget(
toolOutputTokens: number[],
protectThreshold: number = PRUNE_PROTECT
): { toPrune: number[]; prunedTotal: number } {
let total = 0;
let pruned = 0;
const toPrune: number[] = [];
// Go through tool outputs from newest to oldest
for (let i = toolOutputTokens.length - 1; i >= 0; i--) {
const estimate = toolOutputTokens[i];
total += estimate;
// Once we've protected enough, mark the rest for pruning
if (total > protectThreshold) {
pruned += estimate;
toPrune.push(i);
}
}
return { toPrune, prunedTotal: pruned };
}
/**
* Estimate token count from text (rough approximation)
*/
export function estimateTokens(text: string): number {
if (!text) return 0;
// Rough estimate: 1 token ≈ 4 characters
return Math.ceil(text.length / 4);
}
/**
* Generate default compaction prompt
*/
export function getDefaultCompactionPrompt(): string {
return `Provide a detailed prompt for continuing our conversation above. Focus on information that would be helpful for continuing the conversation, including what we did, what we're doing, which files we're working on, and what we're going to do next considering new session will not have access to our conversation.`;
}
/**
* Check if a tool should be protected from pruning
*/
export function isProtectedTool(toolName: string): boolean {
return PRUNE_PROTECTED_TOOLS.includes(toolName);
}
/**
* Calculate context usage percentage
*/
export function getContextUsagePercent(tokens: TokenInfo, contextLimit: number): number {
if (contextLimit === 0) return 0;
const used = tokens.input + (tokens.cacheRead || 0) + tokens.output;
return Math.round((used / contextLimit) * 100);
}
/**
* Get compaction recommendation
*/
export function getCompactionRecommendation(
tokens: TokenInfo,
model: CompactionConfig
): { shouldCompact: boolean; reason: string; urgency: "low" | "medium" | "high" } {
const usagePercent = getContextUsagePercent(tokens, model.contextLimit);
if (usagePercent >= 90) {
return {
shouldCompact: true,
reason: `Context ${usagePercent}% full - compaction required`,
urgency: "high"
};
}
if (usagePercent >= 75) {
return {
shouldCompact: true,
reason: `Context ${usagePercent}% full - compaction recommended`,
urgency: "medium"
};
}
if (usagePercent >= 50) {
return {
shouldCompact: false,
reason: `Context ${usagePercent}% full`,
urgency: "low"
};
}
return {
shouldCompact: false,
reason: "",
urgency: "low"
};
}
export default {
isOverflow,
calculatePruneTarget,
estimateTokens,
getDefaultCompactionPrompt,
isProtectedTool,
getContextUsagePercent,
getCompactionRecommendation,
PRUNE_MINIMUM,
PRUNE_PROTECT,
PRUNE_PROTECTED_TOOLS,
};

View File

@@ -0,0 +1,20 @@
// Compaction Service Exports
export {
isOverflow,
calculatePruneTarget,
estimateTokens,
getCompactionPrompt,
isProtectedTool,
getContextUsagePercent,
getCompactionRecommendation,
compactMessages,
PRUNE_MINIMUM,
PRUNE_PROTECT,
PRUNE_PROTECTED_TOOLS,
} from "./service";
export type {
CompactionConfig,
TokenInfo,
PruneResult,
} from "./service";

View File

@@ -0,0 +1,216 @@
/**
* Compaction Service
*
* Source: https://github.com/sst/opencode.git
* Source: https://github.com/MiniMax-AI/Mini-Agent.git
*
* Implements intelligent context management:
* - Detect token overflow
* - Prune old tool outputs (keep last 40k tokens protected)
* - Generate summaries for compacted content
*/
import { getLogger } from "@/lib/logger";
const log = getLogger("compaction-service");
// Configuration from OpenCode
export const PRUNE_MINIMUM = 20_000; // Minimum tokens before pruning
export const PRUNE_PROTECT = 40_000; // Protect last N tokens
export const PRUNE_PROTECTED_TOOLS = ["skill", "execute"]; // Never prune these
export interface CompactionConfig {
contextLimit: number;
outputMax: number;
}
export interface TokenInfo {
input: number;
output: number;
cacheRead?: number;
cacheWrite?: number;
}
export interface PruneResult {
pruned: number;
total: number;
partsCount: number;
}
/**
* Check if context is overflowing and compaction is needed
*/
export function isOverflow(tokens: TokenInfo, model: CompactionConfig): boolean {
const context = model.contextLimit;
if (context === 0) return false;
const count = tokens.input + (tokens.cacheRead || 0) + tokens.output;
const output = Math.min(model.outputMax, 32000) || 32000;
const usable = context - output;
return count > usable;
}
/**
* Calculate how many tokens to prune from old tool outputs
*/
export function calculatePruneTarget(
toolOutputTokens: number[],
protectThreshold: number = PRUNE_PROTECT
): { toPrune: number[]; prunedTotal: number } {
let total = 0;
let pruned = 0;
const toPrune: number[] = [];
// Go through tool outputs from newest to oldest
for (let i = toolOutputTokens.length - 1; i >= 0; i--) {
const estimate = toolOutputTokens[i];
total += estimate;
// Once we've protected enough, mark the rest for pruning
if (total > protectThreshold) {
pruned += estimate;
toPrune.push(i);
}
}
return { toPrune, prunedTotal: pruned };
}
/**
* Estimate token count from text (rough approximation)
*/
export function estimateTokens(text: string): number {
if (!text) return 0;
// Rough estimate: 1 token ≈ 4 characters
return Math.ceil(text.length / 4);
}
/**
* Generate the compaction summary prompt
*/
export function getCompactionPrompt(): string {
return `Provide a detailed summary for continuing this conversation. Focus on:
1. What we accomplished so far
2. Which files we're working on
3. Current state and any pending tasks
4. Important decisions made
5. What we're doing next
Be concise but comprehensive. The new session will not have access to the full conversation history.`;
}
/**
* Check if a tool should be protected from pruning
*/
export function isProtectedTool(toolName: string): boolean {
return PRUNE_PROTECTED_TOOLS.some(t => toolName.toLowerCase().includes(t));
}
/**
* Calculate context usage percentage
*/
export function getContextUsagePercent(tokens: TokenInfo, contextLimit: number): number {
if (contextLimit === 0) return 0;
const used = tokens.input + (tokens.cacheRead || 0) + tokens.output;
return Math.round((used / contextLimit) * 100);
}
/**
* Get compaction recommendation
*/
export function getCompactionRecommendation(
tokens: TokenInfo,
model: CompactionConfig
): { shouldCompact: boolean; reason: string; urgency: "low" | "medium" | "high" } {
const usagePercent = getContextUsagePercent(tokens, model.contextLimit);
if (usagePercent >= 90) {
return {
shouldCompact: true,
reason: `Context ${usagePercent}% full - compaction required`,
urgency: "high"
};
}
if (usagePercent >= 75) {
return {
shouldCompact: true,
reason: `Context ${usagePercent}% full - compaction recommended`,
urgency: "medium"
};
}
if (usagePercent >= 50) {
return {
shouldCompact: false,
reason: `Context ${usagePercent}% full`,
urgency: "low"
};
}
return {
shouldCompact: false,
reason: "",
urgency: "low"
};
}
/**
* Compact messages by summarizing old ones
*/
export async function compactMessages(
messages: { role: string; content: string }[],
instanceId: string
): Promise<{ summary: string; removedCount: number }> {
if (messages.length < 10) {
return { summary: "", removedCount: 0 };
}
// Take the first 50% of messages for summarization
const cutoff = Math.floor(messages.length / 2);
const toSummarize = messages.slice(0, cutoff);
log.info("Compacting messages", { total: messages.length, summarizing: cutoff });
try {
const response = await fetch("/api/ollama/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
model: "minimax-m1",
messages: [
...toSummarize,
{ role: "user", content: getCompactionPrompt() }
],
stream: false
})
});
if (!response.ok) {
throw new Error(`Compaction API failed: ${response.status}`);
}
const data = await response.json();
const summary = data?.message?.content || "";
return { summary, removedCount: cutoff };
} catch (error) {
log.error("Compaction failed", error);
return { summary: "", removedCount: 0 };
}
}
export default {
isOverflow,
calculatePruneTarget,
estimateTokens,
getCompactionPrompt,
isProtectedTool,
getContextUsagePercent,
getCompactionRecommendation,
compactMessages,
PRUNE_MINIMUM,
PRUNE_PROTECT,
PRUNE_PROTECTED_TOOLS,
};

View File

@@ -0,0 +1,13 @@
// Context Engine Exports
export {
ContextEngineService,
getContextEngine,
initializeContextEngine,
} from "./service";
export type {
ContextEngineConfig,
RetrievedContext,
ContextSource,
IndexStats,
} from "./service";

View File

@@ -0,0 +1,201 @@
/**
* Context Engine Service
*
* Source: https://github.com/Eskapeum/Context-Engine
*
* Provides intelligent context retrieval for enhanced AI responses:
* - File indexing and caching
* - Semantic search across codebase
* - Q&A memory for persistent knowledge
*/
import { getLogger } from "@/lib/logger";
const log = getLogger("context-engine");
export interface ContextEngineConfig {
projectRoot: string;
enableIndexing?: boolean;
enableVectors?: boolean;
}
export interface RetrievedContext {
content: string;
sources: ContextSource[];
relevanceScore: number;
}
export interface ContextSource {
file: string;
line?: number;
symbol?: string;
type: "code" | "documentation" | "memory" | "qa";
}
export interface IndexStats {
filesIndexed: number;
symbolsFound: number;
lastUpdated: number;
}
// Singleton instance
let instance: ContextEngineService | null = null;
export class ContextEngineService {
private config: ContextEngineConfig;
private initialized: boolean = false;
private stats: IndexStats = { filesIndexed: 0, symbolsFound: 0, lastUpdated: 0 };
private memoryCache: Map<string, { question: string; answer: string; timestamp: number }> = new Map();
constructor(config: ContextEngineConfig) {
this.config = config;
}
/**
* Initialize the context engine
*/
async initialize(): Promise<void> {
if (this.initialized) return;
log.info("Context Engine initializing...", { projectRoot: this.config.projectRoot });
// In a full implementation, this would:
// 1. Scan the project directory
// 2. Build Tree-sitter AST for each file
// 3. Create embeddings for semantic search
this.initialized = true;
log.info("Context Engine initialized");
}
/**
* Retrieve relevant context for a query
*/
async retrieve(query: string, options?: { maxTokens?: number }): Promise<RetrievedContext> {
if (!this.initialized) {
await this.initialize();
}
log.info("Retrieving context for query", { query: query.substring(0, 50) });
// Search memory cache first
const memorySuggestions = this.searchMemory(query);
// In a full implementation, this would:
// 1. Vectorize the query
// 2. Search the index for relevant files/symbols
// 3. Rank results by relevance
// 4. Return top matches within token budget
return {
content: memorySuggestions.join("\n\n"),
sources: [],
relevanceScore: 0
};
}
/**
* Index or re-index the project
*/
async index(options?: { force?: boolean }): Promise<IndexStats> {
log.info("Indexing project...", { force: options?.force });
// In a full implementation, this would:
// 1. Walk the file tree
// 2. Parse each file with Tree-sitter
// 3. Extract symbols and documentation
// 4. Generate embeddings
this.stats = {
filesIndexed: 0,
symbolsFound: 0,
lastUpdated: Date.now()
};
return this.stats;
}
/**
* Get current index stats
*/
getStats(): IndexStats {
return this.stats;
}
/**
* Add to Q&A memory
*/
async remember(question: string, answer: string): Promise<void> {
const id = `qa_${Date.now()}`;
this.memoryCache.set(id, {
question,
answer,
timestamp: Date.now()
});
log.info("Remembered Q&A", { question: question.substring(0, 50) });
}
/**
* Search Q&A memory
*/
searchMemory(query: string): string[] {
const results: string[] = [];
const queryLower = query.toLowerCase();
for (const [, entry] of this.memoryCache) {
if (entry.question.toLowerCase().includes(queryLower) ||
entry.answer.toLowerCase().includes(queryLower)) {
results.push(`Q: ${entry.question}\nA: ${entry.answer}`);
}
}
return results.slice(0, 5);
}
/**
* Search Q&A memory (async version)
*/
async recall(query: string): Promise<{ question: string; answer: string }[]> {
log.info("Recalling from memory", { query: query.substring(0, 50) });
const results: { question: string; answer: string }[] = [];
const queryLower = query.toLowerCase();
for (const [, entry] of this.memoryCache) {
if (entry.question.toLowerCase().includes(queryLower) ||
entry.answer.toLowerCase().includes(queryLower)) {
results.push({ question: entry.question, answer: entry.answer });
}
}
return results.slice(0, 10);
}
}
/**
* Get or create context engine instance
*/
export function getContextEngine(config?: ContextEngineConfig): ContextEngineService {
if (!instance && config) {
instance = new ContextEngineService(config);
}
if (!instance) {
throw new Error("Context engine not initialized. Provide config on first call.");
}
return instance;
}
/**
* Initialize context engine for a workspace
*/
export async function initializeContextEngine(projectRoot: string): Promise<ContextEngineService> {
const service = getContextEngine({ projectRoot });
await service.initialize();
return service;
}
export default {
ContextEngineService,
getContextEngine,
initializeContextEngine,
};

Some files were not shown because too many files have changed in this diff Show More