Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ DEEPSEEK_API_KEY=your-deepseek-api-key-here

# For homelab server, if you ollama is running on server, change the following to your ollamma ip ex: http://192.168.x.x:12345
OLLAMA_BASE_URL=http://host.docker.internal:11434
NEXT_PUBLIC_OLLAMA_BASE_URL=http://host.docker.internal:11434


# AI Configuration
# The system uses LLM-based semantic extraction for intelligent, context-aware skill matching
Expand Down
7 changes: 4 additions & 3 deletions src/components/settings/AiSettings.tsx
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"use client";
const OLLAMA = process.env.NEXT_PUBLIC_OLLAMA_BASE_URL || "http://localhost:11434";
import { useEffect, useState } from "react";
import { Card, CardContent, CardHeader, CardTitle } from "../ui/card";
import {
Expand Down Expand Up @@ -118,7 +119,7 @@ function AiSettings() {
setIsLoadingModels(true);
setFetchError("");
try {
const response = await fetch("http://localhost:11434/api/tags");
const response = await fetch(`${OLLAMA}/api/tags`);
if (!response.ok) {
if (selectedModel.provider === AiProvider.OLLAMA) {
setFetchError(
Expand Down Expand Up @@ -148,7 +149,7 @@ function AiSettings() {
const keepModelAlive = async (modelName: string) => {
try {
// Send a request to keep the model loaded for 1 hour
await fetch("http://localhost:11434/api/generate", {
await fetch(`${OLLAMA}/api/generate`, {
method: "POST",
headers: {
"Content-Type": "application/json",
Expand All @@ -169,7 +170,7 @@ function AiSettings() {
setRunningModelError("");
setRunningModelName("");
try {
const response = await fetch("http://localhost:11434/api/ps");
const response = await fetch('${OLLAMA}/api/ps');
if (!response.ok) {
if (selectedModel.provider === AiProvider.OLLAMA) {
setRunningModelError(
Expand Down
8 changes: 6 additions & 2 deletions src/utils/ai.utils.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
const OLLAMA =
process.env.OLLAMA_BASE_URL ||
process.env.NEXT_PUBLIC_OLLAMA_BASE_URL ||
"http://localhost:11434";
import { JobResponse } from "@/models/job.model";
import { AiProvider } from "@/models/ai.model";

Expand Down Expand Up @@ -35,7 +39,7 @@ export const checkIfModelIsRunning = async (

try {
// Check if Ollama service is accessible
const response = await fetch("http://localhost:11434/api/ps", {
const response = await fetch(`${OLLAMA}/api/ps`, {
signal: AbortSignal.timeout(5000), // 5 second timeout
});

Expand Down Expand Up @@ -86,7 +90,7 @@ export const fetchRunningModels = async (): Promise<{
error?: string;
}> => {
try {
const response = await fetch("http://localhost:11434/api/ps", {
const response = await fetch(`${OLLAMA}/api/ps`, {
signal: AbortSignal.timeout(5000),
});

Expand Down