-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlocal_ai.py
More file actions
555 lines (461 loc) · 17.1 KB
/
local_ai.py
File metadata and controls
555 lines (461 loc) · 17.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
"""
Local AI Support for Coding Agent
Optimized for Apple Silicon M3/M4 chips using MLX, llama.cpp, or Ollama.
"""
import asyncio
import json
import os
import platform
import subprocess
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, List, Dict, Any, Generator
import aiohttp
from config import config, LocalAIConfig, MODELS_DIR, CACHE_DIR
@dataclass
class LocalModelInfo:
"""Information about a local model."""
name: str
path: str
size_gb: float
backend: str
quantization: str = "Q4_K_M"
context_size: int = 4096
# Popular coding models optimized for local inference
RECOMMENDED_MODELS = {
"codellama-7b": LocalModelInfo(
name="CodeLlama 7B",
path="codellama-7b-instruct.Q4_K_M.gguf",
size_gb=4.1,
backend="mlx",
quantization="Q4_K_M",
context_size=4096
),
"codellama-13b": LocalModelInfo(
name="CodeLlama 13B",
path="codellama-13b-instruct.Q4_K_M.gguf",
size_gb=7.9,
backend="mlx",
quantization="Q4_K_M",
context_size=4096
),
"deepseek-coder-6.7b": LocalModelInfo(
name="DeepSeek Coder 6.7B",
path="deepseek-coder-6.7b-instruct.Q4_K_M.gguf",
size_gb=4.0,
backend="mlx",
quantization="Q4_K_M",
context_size=8192
),
"qwen2.5-coder-7b": LocalModelInfo(
name="Qwen2.5 Coder 7B",
path="qwen2.5-coder-7b-instruct.Q4_K_M.gguf",
size_gb=4.5,
backend="mlx",
quantization="Q4_K_M",
context_size=8192
),
"starcoder2-7b": LocalModelInfo(
name="StarCoder2 7B",
path="starcoder2-7b-instruct.Q4_K_M.gguf",
size_gb=4.2,
backend="mlx",
quantization="Q4_K_M",
context_size=4096
),
}
class BaseLocalBackend(ABC):
"""Abstract base class for local AI backends."""
def __init__(self, config: LocalAIConfig):
self.config = config
self.model_loaded = False
@abstractmethod
async def load_model(self, model_path: str) -> bool:
"""Load a model into memory."""
pass
@abstractmethod
async def generate(self, prompt: str, max_tokens: int = 2048,
temperature: float = 0.7, stream: bool = False) -> str:
"""Generate text from the model."""
pass
@abstractmethod
async def unload_model(self):
"""Unload the current model from memory."""
pass
@abstractmethod
def get_memory_usage(self) -> Dict[str, float]:
"""Get current memory usage."""
pass
class MLXBackend(BaseLocalBackend):
"""
MLX Backend for Apple Silicon.
MLX is Apple's machine learning framework optimized for M1/M2/M3/M4 chips.
"""
def __init__(self, local_config: Optional[LocalAIConfig] = None):
cfg = local_config or config.local_ai
super().__init__(cfg)
self.model = None
self.tokenizer = None
self._mlx_available = self._check_mlx()
def _check_mlx(self) -> bool:
"""Check if MLX is available."""
try:
import mlx.core as mx
import mlx_lm
return True
except ImportError:
return False
async def load_model(self, model_path: str) -> bool:
"""Load model using MLX."""
if not self._mlx_available:
raise RuntimeError("MLX not available. Install with: pip install mlx mlx-lm")
try:
from mlx_lm import load
# Check if model path is a HuggingFace model ID or local path
if os.path.exists(model_path):
self.model, self.tokenizer = load(model_path)
else:
# Assume it's a HuggingFace model ID
self.model, self.tokenizer = load(model_path)
self.model_loaded = True
return True
except Exception as e:
raise RuntimeError(f"Failed to load model with MLX: {e}")
async def generate(self, prompt: str, max_tokens: int = 2048,
temperature: float = 0.7, stream: bool = False) -> str:
"""Generate text using MLX."""
if not self.model_loaded:
raise RuntimeError("No model loaded")
try:
from mlx_lm import generate
response = generate(
self.model,
self.tokenizer,
prompt=prompt,
max_tokens=max_tokens,
temp=temperature,
verbose=False
)
return response
except Exception as e:
raise RuntimeError(f"MLX generation failed: {e}")
async def unload_model(self):
"""Unload model from memory."""
self.model = None
self.tokenizer = None
self.model_loaded = False
# Force garbage collection
import gc
gc.collect()
def get_memory_usage(self) -> Dict[str, float]:
"""Get MLX memory usage."""
try:
import mlx.core as mx
# MLX doesn't have direct memory tracking, estimate from model size
return {
"estimated_gb": 4.0 if self.model_loaded else 0.0,
"backend": "mlx"
}
except:
return {"estimated_gb": 0.0, "backend": "mlx"}
class LlamaCppBackend(BaseLocalBackend):
"""
llama.cpp Backend for efficient CPU/GPU inference.
Works on all platforms including Apple Silicon.
"""
def __init__(self, local_config: Optional[LocalAIConfig] = None):
cfg = local_config or config.local_ai
super().__init__(cfg)
self.llm = None
self._llama_cpp_available = self._check_llama_cpp()
def _check_llama_cpp(self) -> bool:
"""Check if llama-cpp-python is available."""
try:
from llama_cpp import Llama
return True
except ImportError:
return False
async def load_model(self, model_path: str) -> bool:
"""Load model using llama.cpp."""
if not self._llama_cpp_available:
raise RuntimeError("llama-cpp-python not available. Install with: pip install llama-cpp-python")
try:
from llama_cpp import Llama
# Determine GPU layers based on config
n_gpu_layers = self.config.gpu_layers
if n_gpu_layers == -1:
# Auto-detect: use all layers on Apple Silicon
if platform.processor() == 'arm' and platform.system() == 'Darwin':
n_gpu_layers = 99 # Offload all layers to Metal
self.llm = Llama(
model_path=model_path,
n_ctx=self.config.context_size,
n_threads=self.config.threads,
n_gpu_layers=n_gpu_layers,
verbose=False
)
self.model_loaded = True
return True
except Exception as e:
raise RuntimeError(f"Failed to load model with llama.cpp: {e}")
async def generate(self, prompt: str, max_tokens: int = 2048,
temperature: float = 0.7, stream: bool = False) -> str:
"""Generate text using llama.cpp."""
if not self.model_loaded or not self.llm:
raise RuntimeError("No model loaded")
try:
response = self.llm(
prompt,
max_tokens=max_tokens,
temperature=temperature,
stop=["</s>", "[/INST]", "```\n\n"],
echo=False
)
return response["choices"][0]["text"]
except Exception as e:
raise RuntimeError(f"llama.cpp generation failed: {e}")
async def unload_model(self):
"""Unload model from memory."""
self.llm = None
self.model_loaded = False
import gc
gc.collect()
def get_memory_usage(self) -> Dict[str, float]:
"""Get llama.cpp memory usage."""
if not self.model_loaded:
return {"estimated_gb": 0.0, "backend": "llama.cpp"}
# Estimate based on context size
ctx_mem = (self.config.context_size * 4 * 32) / (1024 ** 3) # Rough estimate
return {
"estimated_gb": 4.0 + ctx_mem,
"backend": "llama.cpp"
}
class OllamaBackend(BaseLocalBackend):
"""
Ollama Backend for easy local model management.
Ollama handles model downloading and serving automatically.
"""
def __init__(self, local_config: Optional[LocalAIConfig] = None):
cfg = local_config or config.local_ai
super().__init__(cfg)
self.base_url = "http://localhost:11434"
self.current_model = None
async def _check_ollama_running(self) -> bool:
"""Check if Ollama server is running."""
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{self.base_url}/api/tags", timeout=aiohttp.ClientTimeout(total=5)) as response:
return response.status == 200
except:
return False
async def _start_ollama(self) -> bool:
"""Attempt to start Ollama server."""
try:
subprocess.Popen(
["ollama", "serve"],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL
)
await asyncio.sleep(2) # Wait for server to start
return await self._check_ollama_running()
except:
return False
async def load_model(self, model_name: str) -> bool:
"""Load/pull model using Ollama."""
if not await self._check_ollama_running():
if not await self._start_ollama():
raise RuntimeError("Ollama not running. Start with: ollama serve")
try:
# Pull model if not available
async with aiohttp.ClientSession() as session:
async with session.post(
f"{self.base_url}/api/pull",
json={"name": model_name},
timeout=aiohttp.ClientTimeout(total=600) # 10 min for download
) as response:
if response.status != 200:
raise RuntimeError(f"Failed to pull model: {await response.text()}")
self.current_model = model_name
self.model_loaded = True
return True
except Exception as e:
raise RuntimeError(f"Failed to load model with Ollama: {e}")
async def generate(self, prompt: str, max_tokens: int = 2048,
temperature: float = 0.7, stream: bool = False) -> str:
"""Generate text using Ollama."""
if not self.model_loaded:
raise RuntimeError("No model loaded")
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{self.base_url}/api/generate",
json={
"model": self.current_model,
"prompt": prompt,
"stream": False,
"options": {
"num_predict": max_tokens,
"temperature": temperature
}
},
timeout=aiohttp.ClientTimeout(total=120)
) as response:
data = await response.json()
return data.get("response", "")
except Exception as e:
raise RuntimeError(f"Ollama generation failed: {e}")
async def unload_model(self):
"""Unload model (Ollama manages this automatically)."""
self.current_model = None
self.model_loaded = False
def get_memory_usage(self) -> Dict[str, float]:
"""Get Ollama memory usage."""
return {
"estimated_gb": 4.0 if self.model_loaded else 0.0,
"backend": "ollama",
"model": self.current_model
}
class LocalAIManager:
"""
Manages local AI inference on Apple Silicon and other platforms.
Automatically selects the best backend for the current hardware.
"""
def __init__(self):
self.backend: Optional[BaseLocalBackend] = None
self.current_model: Optional[str] = None
self._initialize_backend()
def _initialize_backend(self):
"""Initialize the best available backend."""
local_config = config.local_ai
if not local_config.enabled:
return
backend_name = local_config.backend.lower()
if backend_name == "mlx":
self.backend = MLXBackend(local_config)
elif backend_name == "llama.cpp" or backend_name == "llamacpp":
self.backend = LlamaCppBackend(local_config)
elif backend_name == "ollama":
self.backend = OllamaBackend(local_config)
else:
# Auto-detect best backend
if config.is_apple_silicon():
# Prefer MLX on Apple Silicon
try:
self.backend = MLXBackend(local_config)
except:
self.backend = OllamaBackend(local_config)
else:
self.backend = OllamaBackend(local_config)
async def load_model(self, model_name: Optional[str] = None) -> bool:
"""
Load a local model.
Args:
model_name: Model name/path. If None, uses config default.
Returns:
True if loaded successfully
"""
if not self.backend:
raise RuntimeError("No backend available")
model = model_name or config.local_ai.model_name
# Check if it's a recommended model
if model in RECOMMENDED_MODELS:
model_info = RECOMMENDED_MODELS[model]
model_path = MODELS_DIR / model_info.path
# Check if model exists locally
if not model_path.exists():
raise RuntimeError(
f"Model not found: {model_path}\n"
f"Download from HuggingFace or use Ollama: ollama pull {model}"
)
model = str(model_path)
success = await self.backend.load_model(model)
if success:
self.current_model = model
return success
async def generate(self, prompt: str, max_tokens: int = 2048,
temperature: float = 0.7) -> str:
"""Generate text using the local model."""
if not self.backend or not self.backend.model_loaded:
raise RuntimeError("No model loaded. Call load_model() first.")
return await self.backend.generate(prompt, max_tokens, temperature)
async def generate_code_fix(self, code: str, error: str,
context: Optional[str] = None) -> str:
"""Generate a code fix using the local model."""
prompt = f"""<|system|>
You are an expert software engineer. Fix the code error. Return only the fixed code.
<|end|>
<|user|>
Fix this code:
ERROR: {error}
CODE:
```
{code}
```
{f'CONTEXT: {context}' if context else ''}
<|end|>
<|assistant|>
"""
return await self.generate(prompt)
async def analyze_code(self, code: str, analysis_type: str = "review") -> str:
"""Analyze code using the local model."""
prompt = f"""<|system|>
You are an expert code reviewer. Analyze the code and provide actionable feedback.
<|end|>
<|user|>
Analyze this code for {analysis_type}:
```
{code}
```
Provide:
1. Issues found
2. Suggestions for improvement
3. Optimized code if applicable
<|end|>
<|assistant|>
"""
return await self.generate(prompt)
async def unload(self):
"""Unload the current model."""
if self.backend:
await self.backend.unload_model()
self.current_model = None
def get_status(self) -> Dict[str, Any]:
"""Get current status of local AI."""
return {
"enabled": config.local_ai.enabled,
"backend": config.local_ai.backend,
"model_loaded": self.backend.model_loaded if self.backend else False,
"current_model": self.current_model,
"memory_usage": self.backend.get_memory_usage() if self.backend else {},
"is_apple_silicon": config.is_apple_silicon()
}
@staticmethod
def get_recommended_models() -> Dict[str, LocalModelInfo]:
"""Get list of recommended models for coding."""
return RECOMMENDED_MODELS
@staticmethod
async def download_model(model_name: str, backend: str = "ollama") -> bool:
"""
Download a model for local use.
Args:
model_name: Name of the model to download
backend: Backend to use for downloading (ollama recommended)
Returns:
True if download successful
"""
if backend == "ollama":
try:
process = await asyncio.create_subprocess_exec(
"ollama", "pull", model_name,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE
)
await process.communicate()
return process.returncode == 0
except:
return False
return False
# Global local AI manager instance
local_ai_manager = LocalAIManager()