Skip to content

Commit 4d69316

Browse files
committed
feat(adapters): add local model adapter for Ollama, LMStudio, llama.cpp
- Single adapter supporting all OpenAI-compatible local servers - Auto-detect: Ollama (:11434), LMStudio (:1234), llama.cpp (:8080) - Model aliases: gemma4, llama3, mistral, qwen, deepseek, phi, starcoder - Provider auto-inference from model name (name:tag format → local) - Smart error handling: model not found → list available models - Tested with Ollama gemma4:e4b (15.9s) and LMStudio gemma-4-e4b-it (11.3s)
1 parent e966fa5 commit 4d69316

File tree

6 files changed

+349
-4
lines changed

6 files changed

+349
-4
lines changed

src/adapters/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,17 +18,20 @@ export { spawnCli } from './base.js';
1818
export { ClaudeCliAdapter } from './claude.js';
1919
export { CodexCliAdapter } from './codex.js';
2020
export { GptCliAdapter } from './gpt.js';
21+
export { LocalModelAdapter } from './local.js';
2122
export { registerProcess, getProcess, getAllProcesses, killProcess, startHealthChecker, stopHealthChecker } from './processRegistry.js';
2223

2324
import { ClaudeCliAdapter } from './claude.js';
2425
import { CodexCliAdapter } from './codex.js';
2526
import { GptCliAdapter } from './gpt.js';
27+
import { LocalModelAdapter } from './local.js';
2628
import type { AdapterName, CliAdapter } from './types.js';
2729

2830
const adapters: Record<string, CliAdapter> = {
2931
claude: new ClaudeCliAdapter(),
3032
codex: new CodexCliAdapter(),
3133
gpt: new GptCliAdapter(),
34+
local: new LocalModelAdapter(),
3235
};
3336

3437
let defaultAdapter: AdapterName = 'claude';

src/adapters/local.ts

Lines changed: 325 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,325 @@
1+
// ============================================
2+
// OpenSwarm - Local Model Adapter
3+
// Created: 2026-04-10
4+
// Purpose: Ollama, LMStudio, llama.cpp 등 로컬 OpenAI 호환 서버 지원
5+
// ============================================
6+
7+
import type {
8+
CliAdapter,
9+
CliRunOptions,
10+
CliRunResult,
11+
AdapterCapabilities,
12+
WorkerResult,
13+
ReviewResult,
14+
} from './types.js';
15+
import { t } from '../locale/index.js';
16+
17+
// 로컬 프로바이더 기본 URL 후보 (우선순위 순)
18+
const DEFAULT_ENDPOINTS = [
19+
'http://localhost:11434', // Ollama
20+
'http://localhost:1234', // LMStudio
21+
'http://localhost:8080', // llama.cpp server
22+
];
23+
24+
const DEFAULT_MODEL = 'gemma3:4b';
25+
const HEALTH_CHECK_TIMEOUT_MS = 2000;
26+
27+
export class LocalModelAdapter implements CliAdapter {
28+
readonly name = 'local';
29+
30+
readonly capabilities: AdapterCapabilities = {
31+
supportsStreaming: false,
32+
supportsJsonOutput: true,
33+
supportsModelSelection: true,
34+
managedGit: false,
35+
supportedSkills: [],
36+
};
37+
38+
// 활성 서버 URL (isAvailable에서 감지, run에서 사용)
39+
private activeUrl: string | null = null;
40+
private configuredUrl: string | null = null;
41+
42+
/** config.yaml에서 baseUrl을 주입받을 때 사용 */
43+
setBaseUrl(url: string): void {
44+
this.configuredUrl = url;
45+
}
46+
47+
async isAvailable(): Promise<boolean> {
48+
const candidates = this.configuredUrl
49+
? [this.configuredUrl, ...DEFAULT_ENDPOINTS]
50+
: DEFAULT_ENDPOINTS;
51+
52+
for (const url of candidates) {
53+
try {
54+
const res = await fetch(`${url}/v1/models`, {
55+
signal: AbortSignal.timeout(HEALTH_CHECK_TIMEOUT_MS),
56+
});
57+
if (res.ok) {
58+
this.activeUrl = url;
59+
return true;
60+
}
61+
} catch {
62+
// 서버 미실행 — 다음 후보로
63+
}
64+
}
65+
return false;
66+
}
67+
68+
/** 현재 활성 서버 URL 반환 (디버깅용) */
69+
getActiveUrl(): string | null {
70+
return this.activeUrl;
71+
}
72+
73+
/** 사용 가능한 모델 목록 조회 */
74+
async listModels(): Promise<string[]> {
75+
if (!this.activeUrl) {
76+
const available = await this.isAvailable();
77+
if (!available) return [];
78+
}
79+
80+
try {
81+
const res = await fetch(`${this.activeUrl}/v1/models`, {
82+
signal: AbortSignal.timeout(HEALTH_CHECK_TIMEOUT_MS),
83+
});
84+
if (!res.ok) return [];
85+
86+
const data = (await res.json()) as { data?: Array<{ id: string }> };
87+
return data.data?.map(m => m.id) ?? [];
88+
} catch {
89+
return [];
90+
}
91+
}
92+
93+
buildCommand(_options: CliRunOptions): { command: string; args: string[] } {
94+
return { command: 'echo', args: ['"Local adapter uses run() — not shell spawn"'] };
95+
}
96+
97+
async run(options: CliRunOptions): Promise<CliRunResult> {
98+
const startTime = Date.now();
99+
100+
// 서버 연결 확인
101+
if (!this.activeUrl) {
102+
const available = await this.isAvailable();
103+
if (!available) {
104+
return {
105+
exitCode: 1,
106+
stdout: '',
107+
stderr: 'No local model server found. Start Ollama, LMStudio, or llama.cpp server first.\n' +
108+
`Checked: ${(this.configuredUrl ? [this.configuredUrl, ...DEFAULT_ENDPOINTS] : DEFAULT_ENDPOINTS).join(', ')}`,
109+
durationMs: Date.now() - startTime,
110+
};
111+
}
112+
}
113+
114+
const model = options.model ?? DEFAULT_MODEL;
115+
const body = {
116+
model,
117+
messages: [
118+
{ role: 'user' as const, content: options.prompt },
119+
],
120+
temperature: 0.2,
121+
stream: false,
122+
};
123+
124+
try {
125+
const res = await fetch(`${this.activeUrl}/v1/chat/completions`, {
126+
method: 'POST',
127+
headers: { 'Content-Type': 'application/json' },
128+
body: JSON.stringify(body),
129+
signal: options.timeoutMs
130+
? AbortSignal.timeout(options.timeoutMs)
131+
: undefined,
132+
});
133+
134+
const durationMs = Date.now() - startTime;
135+
136+
if (!res.ok) {
137+
const errText = await res.text().catch(() => '');
138+
139+
// 모델 없음 에러 시 사용 가능한 모델 목록 안내
140+
if (res.status === 404 || errText.includes('not found')) {
141+
const models = await this.listModels();
142+
const modelList = models.length > 0
143+
? `Available: ${models.slice(0, 10).join(', ')}`
144+
: 'No models loaded';
145+
return {
146+
exitCode: 1,
147+
stdout: '',
148+
stderr: `Model "${model}" not found on ${this.activeUrl}. ${modelList}`,
149+
durationMs,
150+
};
151+
}
152+
153+
return {
154+
exitCode: 1,
155+
stdout: '',
156+
stderr: `Local API error (${res.status}): ${errText.slice(0, 500)}`,
157+
durationMs,
158+
};
159+
}
160+
161+
const data = (await res.json()) as OpenAICompatResponse;
162+
const content = data.choices?.[0]?.message?.content ?? '';
163+
164+
if (options.onLog) {
165+
options.onLog(content.slice(0, 300));
166+
}
167+
168+
return {
169+
exitCode: 0,
170+
stdout: content,
171+
stderr: '',
172+
durationMs: Date.now() - startTime,
173+
};
174+
} catch (err) {
175+
// 타임아웃 또는 네트워크 에러
176+
const message = err instanceof Error ? err.message : String(err);
177+
const isTimeout = message.includes('abort') || message.includes('timeout');
178+
179+
return {
180+
exitCode: 1,
181+
stdout: '',
182+
stderr: isTimeout
183+
? `Local model timeout after ${options.timeoutMs ?? 300000}ms (model: ${model}). Local models can be slow — consider increasing timeout.`
184+
: `Local model request failed: ${message}`,
185+
durationMs: Date.now() - startTime,
186+
};
187+
}
188+
}
189+
190+
parseWorkerOutput(raw: CliRunResult): WorkerResult {
191+
const text = raw.stdout;
192+
return extractWorkerResultJson(text) ?? extractWorkerFromText(text);
193+
}
194+
195+
parseReviewerOutput(raw: CliRunResult): ReviewResult {
196+
const text = raw.stdout;
197+
return extractReviewerResultJson(text) ?? extractReviewerFromText(text);
198+
}
199+
}
200+
201+
// OpenAI 호환 응답 타입
202+
interface OpenAICompatResponse {
203+
choices: Array<{
204+
message: {
205+
content: string;
206+
role: string;
207+
};
208+
finish_reason: string;
209+
}>;
210+
usage?: {
211+
prompt_tokens: number;
212+
completion_tokens: number;
213+
total_tokens: number;
214+
};
215+
model?: string;
216+
}
217+
218+
// Worker/Reviewer 출력 파싱 (GPT 어댑터와 동일 로직)
219+
220+
function extractWorkerResultJson(text: string): WorkerResult | null {
221+
const jsonMatch = text.match(/```json\s*([\s\S]*?)\s*```/);
222+
const jsonStr = jsonMatch?.[1] ?? findJsonObject(text, '"success"');
223+
if (!jsonStr) return null;
224+
225+
try {
226+
const parsed = JSON.parse(jsonStr);
227+
return {
228+
success: Boolean(parsed.success),
229+
summary: parsed.summary || t('common.fallback.noSummary'),
230+
filesChanged: Array.isArray(parsed.filesChanged) ? parsed.filesChanged : [],
231+
commands: Array.isArray(parsed.commands) ? parsed.commands : [],
232+
output: text,
233+
error: parsed.error,
234+
confidencePercent: typeof parsed.confidencePercent === 'number'
235+
? parsed.confidencePercent : undefined,
236+
haltReason: parsed.haltReason || undefined,
237+
};
238+
} catch {
239+
return null;
240+
}
241+
}
242+
243+
function extractWorkerFromText(text: string): WorkerResult {
244+
const hasError = /error|fail|exception|cannot/i.test(text);
245+
const hasSuccess = /success|completed|done|finished/i.test(text);
246+
247+
return {
248+
success: !hasError || hasSuccess,
249+
summary: extractSummary(text),
250+
filesChanged: [],
251+
commands: [],
252+
output: text,
253+
error: hasError ? extractErrorMessage(text) : undefined,
254+
};
255+
}
256+
257+
function extractReviewerResultJson(text: string): ReviewResult | null {
258+
const jsonMatch = text.match(/```json\s*([\s\S]*?)\s*```/);
259+
const jsonStr = jsonMatch?.[1] ?? findJsonObject(text, '"decision"');
260+
if (!jsonStr) return null;
261+
262+
try {
263+
const parsed = JSON.parse(jsonStr);
264+
const decision = parsed.decision === 'approve' || parsed.decision === 'reject'
265+
? parsed.decision
266+
: 'revise';
267+
return {
268+
decision,
269+
feedback: typeof parsed.feedback === 'string' ? parsed.feedback : t('common.fallback.noSummary'),
270+
issues: Array.isArray(parsed.issues)
271+
? parsed.issues.filter((v: unknown): v is string => typeof v === 'string')
272+
: [],
273+
suggestions: Array.isArray(parsed.suggestions)
274+
? parsed.suggestions.filter((v: unknown): v is string => typeof v === 'string')
275+
: [],
276+
};
277+
} catch {
278+
return null;
279+
}
280+
}
281+
282+
function extractReviewerFromText(text: string): ReviewResult {
283+
const lower = text.toLowerCase();
284+
const decision = lower.includes('approve')
285+
? 'approve'
286+
: lower.includes('reject')
287+
? 'reject'
288+
: 'revise';
289+
return {
290+
decision,
291+
feedback: extractSummary(text),
292+
issues: [],
293+
suggestions: [],
294+
};
295+
}
296+
297+
function findJsonObject(text: string, marker: string): string | null {
298+
const idx = text.indexOf(marker);
299+
if (idx < 0) return null;
300+
let start = text.lastIndexOf('{', idx);
301+
if (start < 0) return null;
302+
let depth = 0;
303+
for (let i = start; i < text.length; i++) {
304+
if (text[i] === '{') depth++;
305+
if (text[i] === '}') {
306+
depth--;
307+
if (depth === 0) return text.slice(start, i + 1);
308+
}
309+
}
310+
return null;
311+
}
312+
313+
function extractSummary(text: string): string {
314+
const lines = text.split('\n').filter(l => l.trim().length > 10);
315+
if (lines.length === 0) return t('common.fallback.noSummary');
316+
const summary = lines[0].trim();
317+
return summary.length > 200 ? `${summary.slice(0, 200)}...` : summary;
318+
}
319+
320+
function extractErrorMessage(text: string): string {
321+
const errorMatch = text.match(/(?:error|exception|failed?):\s*(.+)/i);
322+
if (errorMatch) return errorMatch[1].slice(0, 200);
323+
const lines = text.split('\n').filter(l => /error|fail/i.test(l));
324+
return lines.length > 0 ? lines[0].slice(0, 200) : 'Unknown error';
325+
}

src/adapters/types.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@ import type { WorkerResult, ReviewResult } from '../agents/agentPair.js';
88
// Re-export for convenience
99
export type { WorkerResult, ReviewResult };
1010

11-
export type AdapterName = 'claude' | 'codex' | 'gpt';
11+
export type AdapterName = 'claude' | 'codex' | 'gpt' | 'local';
1212

1313
/**
1414
* Raw result from a CLI process execution

src/automation/runnerTypes.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import type { ExecutorResult } from '../orchestration/workflow.js';
77
import type { DefaultRolesConfig, ProjectAgentConfig, JobProfile } from '../core/types.js';
88

99
export interface AutonomousConfig {
10-
defaultAdapter?: 'claude' | 'codex' | 'gpt';
10+
defaultAdapter?: 'claude' | 'codex' | 'gpt' | 'local';
1111
linearTeamId: string;
1212
allowedProjects: string[];
1313
heartbeatSchedule: string;

src/core/types.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ export type SwarmEvent = {
9292
*/
9393
export type SwarmConfig = {
9494
/** Default CLI adapter */
95-
adapter?: 'claude' | 'codex' | 'gpt';
95+
adapter?: 'claude' | 'codex' | 'gpt' | 'local';
9696
/** UI language: 'en' | 'ko' (default: 'en') */
9797
language: 'en' | 'ko';
9898
/** Discord bot token */
@@ -256,7 +256,7 @@ export type RoleConfig = {
256256
/** Whether role is enabled */
257257
enabled: boolean;
258258
/** CLI adapter name */
259-
adapter?: 'claude' | 'codex' | 'gpt';
259+
adapter?: 'claude' | 'codex' | 'gpt' | 'local';
260260
/** Model ID */
261261
model: string;
262262
/** Timeout (ms), 0 = unlimited */

0 commit comments

Comments
 (0)