Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .env
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
GEMINI_API_KEY=Enter Key
3 changes: 3 additions & 0 deletions .idea/.gitignore

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

14 changes: 14 additions & 0 deletions .idea/agentic-gemini-ai-react.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/googol.iml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

65 changes: 65 additions & 0 deletions .idea/inspectionProfiles/Project_Default.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/inspectionProfiles/profiles_settings.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions .idea/misc.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 8 additions & 0 deletions .idea/modules.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 6 additions & 0 deletions .idea/vcs.xml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

21 changes: 21 additions & 0 deletions docs/diagrams/archetecture.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
┌──────────────┐
│ TCIA API │
└──────┬───────┘
Async Batch Ingestion
┌──────▼───────┐
│ DICOM FS │ ← src/data/
└──────┬───────┘
Metadata Extraction (no pixels)
┌──────▼───────┐
│ Vector DB │ ← FAISS / Chroma
└──────┬───────┘
Retrieval-Augmented Context
┌──────▼───────┐
│ Gemini LLM │ ← ontology reasoning
└──────────────┘
Empty file added eval/__init__.py
Empty file.
21 changes: 21 additions & 0 deletions eval/eval_runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from typing import List


class EvalCase:
def __init__(self, prompt, expected_keywords: List[str]):
self.prompt = prompt
self.expected_keywords = expected_keywords


class Evaluator:
def run(self, agent, cases: List[EvalCase]):
results = []
for case in cases:
response = agent(case.prompt)
score = sum(1 for k in case.expected_keywords if k in response.lower())
results.append({
"prompt": case.prompt,
"score": score,
"response": response
})
return results
16 changes: 16 additions & 0 deletions eval/evaluate_rag.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
# eval/evaluate_rag.py

import asyncio

QUERIES = [
("lung CT", "CHEST"),
("brain MRI", "HEAD")
]

async def evaluate(store):
score = 0
for query, expected in QUERIES:
results = await store.similarity_search(query)
if expected in str(results[0].metadata):
score += 1
return score / len(QUERIES)
28 changes: 0 additions & 28 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,28 +0,0 @@
# Core Dependencies
fastapi==0.115.6
uvicorn[standard]==0.34.0
streamlit==1.41.1
python-multipart==0.0.20

# Google AI
google-generativeai==0.8.3
google-cloud-aiplatform==1.75.0

# Image Processing
Pillow==11.0.0
opencv-python==4.10.0.84

# Data Handling
pydantic==2.10.5
pydantic-settings==2.7.0
python-dotenv==1.0.1

# Utilities
aiofiles==24.1.0
httpx==0.28.1

# Development
pytest==8.3.4
pytest-asyncio==0.24.0
black==24.10.0
flake8==7.1.1
Binary file added src/agent/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file added src/agent/__pycache__/agent.cpython-312.pyc
Binary file not shown.
Binary file added src/agent/__pycache__/executor.cpython-312.pyc
Binary file not shown.
Binary file added src/agent/__pycache__/planner.cpython-312.pyc
Binary file not shown.
14 changes: 14 additions & 0 deletions src/agent/agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
from core.planner import Planner
from core.executor import Executor
from core.memory import VectorMemory

class Agent:
def __init__(self):
self.planner = Planner()
self.executor = Executor()
self.memory = VectorMemory()

def run(self, user_input):
plan = self.planner.create_plan(user_input)
answer, trace = self.executor.execute(plan, self.memory, user_input)
return answer, trace
9 changes: 9 additions & 0 deletions src/agent/coordinator.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
# src/dicom_agent/agents/coordinator.py
from pipeline.async_ingest import ingest_collection
from tools.dicom_ontology_llm_tool import explain_metadata


async def run_pipeline(collection: str, store):
await ingest_collection(collection, store)
results = await store.similarity_search("lung CT")
return await explain_metadata(results[0].metadata)
28 changes: 28 additions & 0 deletions src/agent/executor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# agents/executor.py
class ExecutorAgent:
def __init__(self, tools: dict):
self.tools = tools
self.context = {}

def resolve_args(self, args: dict):
resolved = {}
for k, v in args.items():
if isinstance(v, str) and v.startswith("$previous."):
key = v.replace("$previous.", "")
resolved[k] = self.context.get(key)
else:
resolved[k] = v
return resolved

def execute(self, plan: dict):
for step in plan["steps"]:
tool_name = step["tool"]
tool = self.tools[tool_name]

args = self.resolve_args(step["args"])
result = tool.run(**args)

# Save outputs for downstream steps
self.context.update(result)

return self.context
42 changes: 42 additions & 0 deletions src/agent/planner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# agents/planner.py
class PlannerAgent:
def plan(self, goal: str) -> dict:
"""
Convert a goal into a structured execution plan
"""
# In production, this is an LLM call
return {
"steps": [
{
"tool": "download_files",
"args": {
"url": "https://example.com/dicom",
"output_dir": "data"
}
},
{
"tool": "extract_metadata",
"args": {
"files": "$previous.files"
}
},
{
"tool": "create_vector_store",
"args": {
"metadata": "$previous.metadata"
}
},
{
"tool": "image_to_dicom",
"args": {
"image_path": "annotated_output.png",
"output_dcm": "annotated_output.dcm",
"patient_name": "DOE^JANE",
"patient_id": "A12345",
"study_description": "AI Annotated Image",
"modality": "OT"
}
}

]
}
15 changes: 15 additions & 0 deletions src/annotate_main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# main.py
from tools.image_to_dicom_tool import ImageToDICOMTool


def main():
tool = ImageToDICOMTool()

result = tool.run(
image_path="input.jpg",
output_dcm="output.dcm",
study_description="AI Generated Image"
)

if __name__ == "__main__":
main()
Empty file added src/core/__init__.py
Empty file.
Binary file added src/core/__pycache__/__init__.cpython-312.pyc
Binary file not shown.
Binary file added src/core/__pycache__/executor.cpython-312.pyc
Binary file not shown.
Binary file added src/core/__pycache__/memory.cpython-312.pyc
Binary file not shown.
Binary file added src/core/__pycache__/planner.cpython-312.pyc
Binary file not shown.
36 changes: 36 additions & 0 deletions src/core/executor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from tools.gemini_client import GeminiClient
from tools.search_tool import SearchTool
from utils.logger import logger

class Executor:
def __init__(self):
self.llm = GeminiClient()
self.search = SearchTool()

def execute(self, plan, memory, user_input):
reasoning_trace = []
context = memory.retrieve(user_input)

for step in plan:
reasoning_trace.append(f"Thought: {step['thought']}")
if step["action"] == "search":
result = self.search.search(user_input)
reasoning_trace.append(f"Observation: {result}")
memory.store(result)

prompt = f"""You are a ReAct-style agent.

Context:
{context}

Reasoning:
{chr(10).join(reasoning_trace)}

Question:
{user_input}

Provide final answer.
"""
answer = self.llm.generate(prompt)
memory.store(answer)
return answer, reasoning_trace
20 changes: 20 additions & 0 deletions src/core/memory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import faiss
from sentence_transformers import SentenceTransformer

class VectorMemory:
def __init__(self):
self.model = SentenceTransformer("all-MiniLM-L6-v2")
self.index = faiss.IndexFlatL2(384)
self.texts = []

def store(self, text):
emb = self.model.encode([text])
self.index.add(emb)
self.texts.append(text)

def retrieve(self, query, k=3):
if self.index.ntotal == 0:
return ""
emb = self.model.encode([query])
_, idx = self.index.search(emb, k)
return "\n".join(self.texts[i] for i in idx[0])
Loading
Loading