From a1a530a0627e3291bdf68dd8d9fb1928ecaca9ac Mon Sep 17 00:00:00 2001 From: Steven Leggett Date: Tue, 20 Jan 2026 23:52:20 -0500 Subject: [PATCH 1/3] style: Fix black formatting issues --- sugar/memory/store.py | 66 ++++++++++++++++-------- sugar/orchestration/task_orchestrator.py | 16 ++++-- sugar/storage/issue_response_manager.py | 12 +++-- sugar/storage/task_type_manager.py | 6 ++- sugar/storage/work_queue.py | 56 +++++++++++++------- tests/test_storage.py | 6 ++- 6 files changed, 111 insertions(+), 51 deletions(-) diff --git a/sugar/memory/store.py b/sugar/memory/store.py index b8a9848..6512395 100644 --- a/sugar/memory/store.py +++ b/sugar/memory/store.py @@ -97,7 +97,8 @@ def _init_db(self): cursor = conn.cursor() # Main memory entries table - cursor.execute(""" + cursor.execute( + """ CREATE TABLE IF NOT EXISTS memory_entries ( id TEXT PRIMARY KEY, memory_type TEXT NOT NULL, @@ -111,24 +112,32 @@ def _init_db(self): access_count INTEGER DEFAULT 0, expires_at TIMESTAMP ) - """) + """ + ) # Indexes - cursor.execute(""" + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_memory_type ON memory_entries(memory_type) - """) - cursor.execute(""" + """ + ) + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_memory_importance ON memory_entries(importance DESC) - """) - cursor.execute(""" + """ + ) + cursor.execute( + """ CREATE INDEX IF NOT EXISTS idx_memory_created ON memory_entries(created_at DESC) - """) + """ + ) # FTS5 for keyword search (always available) - cursor.execute(""" + cursor.execute( + """ CREATE VIRTUAL TABLE IF NOT EXISTS memory_fts USING fts5( id, content, @@ -136,39 +145,48 @@ def _init_db(self): content='memory_entries', content_rowid='rowid' ) - """) + """ + ) # Triggers to keep FTS in sync - cursor.execute(""" + cursor.execute( + """ CREATE TRIGGER IF NOT EXISTS memory_ai AFTER INSERT ON memory_entries BEGIN INSERT INTO memory_fts(rowid, id, content, summary) VALUES (new.rowid, new.id, new.content, new.summary); END - """) - cursor.execute(""" + """ + ) + cursor.execute( + """ CREATE TRIGGER IF NOT EXISTS memory_ad AFTER DELETE ON memory_entries BEGIN INSERT INTO memory_fts(memory_fts, rowid, id, content, summary) VALUES ('delete', old.rowid, old.id, old.content, old.summary); END - """) - cursor.execute(""" + """ + ) + cursor.execute( + """ CREATE TRIGGER IF NOT EXISTS memory_au AFTER UPDATE ON memory_entries BEGIN INSERT INTO memory_fts(memory_fts, rowid, id, content, summary) VALUES ('delete', old.rowid, old.id, old.content, old.summary); INSERT INTO memory_fts(rowid, id, content, summary) VALUES (new.rowid, new.id, new.content, new.summary); END - """) + """ + ) # Vector storage table (if sqlite-vec available) if self._has_vec: try: - cursor.execute(f""" + cursor.execute( + f""" CREATE VIRTUAL TABLE IF NOT EXISTS memory_vectors USING vec0( id TEXT PRIMARY KEY, embedding float[{EMBEDDING_DIM}] ) - """) + """ + ) except Exception as e: logger.warning(f"Failed to create vector table: {e}") self._has_vec = False @@ -584,20 +602,24 @@ def prune_expired(self) -> int: cursor = conn.cursor() # Get IDs to delete (for vector cleanup) - cursor.execute(""" + cursor.execute( + """ SELECT id FROM memory_entries WHERE expires_at IS NOT NULL AND expires_at < datetime('now') - """) + """ + ) expired_ids = [row["id"] for row in cursor.fetchall()] if not expired_ids: return 0 # Delete from main table - cursor.execute(""" + cursor.execute( + """ DELETE FROM memory_entries WHERE expires_at IS NOT NULL AND expires_at < datetime('now') - """) + """ + ) deleted = cursor.rowcount # Clean up vectors diff --git a/sugar/orchestration/task_orchestrator.py b/sugar/orchestration/task_orchestrator.py index 0bfcf62..2a1694e 100644 --- a/sugar/orchestration/task_orchestrator.py +++ b/sugar/orchestration/task_orchestrator.py @@ -721,7 +721,9 @@ def _build_stage_prompt( """ if stage == OrchestrationStage.RESEARCH: - return base_prompt + """ + return ( + base_prompt + + """ ## Your Role You are conducting research for this task. Your goals: 1. Search for relevant best practices and documentation @@ -736,6 +738,7 @@ def _build_stage_prompt( - Technical requirements - Recommendations for implementation """ + ) elif stage == OrchestrationStage.PLANNING: research_context = "" @@ -744,7 +747,10 @@ def _build_stage_prompt( f"\n## Research Findings\n{context['research_output']}\n" ) - return base_prompt + research_context + """ + return ( + base_prompt + + research_context + + """ ## Your Role You are creating an implementation plan for this task. Your goals: 1. Break down the task into manageable subtasks @@ -769,12 +775,15 @@ def _build_stage_prompt( ## Dependencies Explain the order of execution and why. """ + ) elif stage == OrchestrationStage.REVIEW: impl_results = context.get("subtask_results", []) files_modified = context.get("files_modified", []) - return base_prompt + f""" + return ( + base_prompt + + f""" ## Implementation Complete The following subtasks have been completed: {json.dumps(impl_results, indent=2)} @@ -797,6 +806,7 @@ def _build_stage_prompt( - Recommendations for improvement - Overall assessment (pass/fail) """ + ) else: return base_prompt diff --git a/sugar/storage/issue_response_manager.py b/sugar/storage/issue_response_manager.py index 6f243c6..8ac85a3 100644 --- a/sugar/storage/issue_response_manager.py +++ b/sugar/storage/issue_response_manager.py @@ -26,7 +26,8 @@ async def initialize(self) -> None: return async with aiosqlite.connect(self.db_path) as db: - await db.execute(""" + await db.execute( + """ CREATE TABLE IF NOT EXISTS issue_responses ( id TEXT PRIMARY KEY, repo TEXT NOT NULL, @@ -40,12 +41,15 @@ async def initialize(self) -> None: was_auto_posted BOOLEAN DEFAULT 0, UNIQUE(repo, issue_number, response_type) ) - """) + """ + ) - await db.execute(""" + await db.execute( + """ CREATE INDEX IF NOT EXISTS idx_issue_responses_repo_number ON issue_responses (repo, issue_number) - """) + """ + ) await db.commit() diff --git a/sugar/storage/task_type_manager.py b/sugar/storage/task_type_manager.py index 5b255cf..d1ac8c9 100644 --- a/sugar/storage/task_type_manager.py +++ b/sugar/storage/task_type_manager.py @@ -35,7 +35,8 @@ async def initialize(self): if not table_exists: # Create task_types table - await db.execute(""" + await db.execute( + """ CREATE TABLE task_types ( id TEXT PRIMARY KEY, name TEXT NOT NULL, @@ -56,7 +57,8 @@ async def initialize(self): created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) - """) + """ + ) # Populate with default types default_types = self._get_default_task_types() diff --git a/sugar/storage/work_queue.py b/sugar/storage/work_queue.py index 0ef6863..80b159b 100644 --- a/sugar/storage/work_queue.py +++ b/sugar/storage/work_queue.py @@ -28,7 +28,8 @@ async def initialize(self): return async with aiosqlite.connect(self.db_path) as db: - await db.execute(""" + await db.execute( + """ CREATE TABLE IF NOT EXISTS work_items ( id TEXT PRIMARY KEY, type TEXT NOT NULL, @@ -51,17 +52,22 @@ async def initialize(self): total_elapsed_time REAL DEFAULT 0.0, commit_sha TEXT ) - """) + """ + ) - await db.execute(""" + await db.execute( + """ CREATE INDEX IF NOT EXISTS idx_work_items_priority_status ON work_items (priority ASC, status, created_at) - """) + """ + ) - await db.execute(""" + await db.execute( + """ CREATE INDEX IF NOT EXISTS idx_work_items_status ON work_items (status) - """) + """ + ) # Migrate existing databases to add timing columns and task types table await self._migrate_timing_columns(db) @@ -120,7 +126,8 @@ async def _migrate_task_types_table(self, db): if not table_exists: # Create task_types table - await db.execute(""" + await db.execute( + """ CREATE TABLE task_types ( id TEXT PRIMARY KEY, name TEXT NOT NULL, @@ -133,7 +140,8 @@ async def _migrate_task_types_table(self, db): created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ) - """) + """ + ) # Insert default task types default_types = [ @@ -261,10 +269,12 @@ async def _migrate_orchestration_columns(self, db): logger.info("Added assigned_agent column to existing database") # Create index for parent_task_id queries - await db.execute(""" + await db.execute( + """ CREATE INDEX IF NOT EXISTS idx_work_items_parent_task_id ON work_items (parent_task_id) - """) + """ + ) except Exception as e: logger.warning(f"Orchestration migration warning (non-critical): {e}") @@ -446,12 +456,14 @@ async def get_next_work(self) -> Optional[Dict[str, Any]]: db.row_factory = aiosqlite.Row # Get highest priority pending work item (exclude hold status) - cursor = await db.execute(""" + cursor = await db.execute( + """ SELECT * FROM work_items WHERE status = 'pending' ORDER BY priority ASC, created_at ASC LIMIT 1 - """) + """ + ) row = await cursor.fetchone() @@ -694,11 +706,13 @@ async def get_stats(self) -> Dict[str, int]: stats = {} # Count by status - cursor = await db.execute(""" + cursor = await db.execute( + """ SELECT status, COUNT(*) as count FROM work_items GROUP BY status - """) + """ + ) rows = await cursor.fetchall() for row in rows: @@ -712,10 +726,12 @@ async def get_stats(self) -> Dict[str, int]: stats["total"] = sum(stats.values()) # Recent activity (last 24 hours) - cursor = await db.execute(""" + cursor = await db.execute( + """ SELECT COUNT(*) FROM work_items WHERE created_at > datetime('now', '-1 day') - """) + """ + ) stats["recent_24h"] = (await cursor.fetchone())[0] return stats @@ -723,11 +739,15 @@ async def get_stats(self) -> Dict[str, int]: async def cleanup_old_items(self, days_old: int = 30): """Clean up old completed/failed items""" async with aiosqlite.connect(self.db_path) as db: - cursor = await db.execute(""" + cursor = await db.execute( + """ DELETE FROM work_items WHERE status IN ('completed', 'failed') AND created_at < datetime('now', '-{} days') - """.format(days_old)) + """.format( + days_old + ) + ) deleted_count = cursor.rowcount await db.commit() diff --git a/tests/test_storage.py b/tests/test_storage.py index 1feaa18..8b6d701 100644 --- a/tests/test_storage.py +++ b/tests/test_storage.py @@ -470,7 +470,8 @@ async def test_migration_adds_timing_columns(self, temp_dir): import aiosqlite async with aiosqlite.connect(str(db_path)) as db: - await db.execute(""" + await db.execute( + """ CREATE TABLE work_items ( id TEXT PRIMARY KEY, type TEXT NOT NULL, @@ -489,7 +490,8 @@ async def test_migration_adds_timing_columns(self, temp_dir): result TEXT, error_message TEXT ) - """) + """ + ) await db.commit() # Initialize WorkQueue (should trigger migration) From 6c37975a354bbac8dfadd5571f3552cc11089a65 Mon Sep 17 00:00:00 2001 From: Steven Leggett Date: Wed, 21 Jan 2026 00:00:01 -0500 Subject: [PATCH 2/3] ci: Pin black version to ensure consistent formatting --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7f6ee41..36d4434 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,7 +37,7 @@ jobs: run: | python -m pip install --upgrade pip pip install -e . - pip install pytest pytest-asyncio pytest-cov black flake8 mypy + pip install pytest pytest-asyncio pytest-cov 'black>=24.0.0,<26.0.0' flake8 mypy - name: Lint with flake8 run: | From ca7025c63aed1c436b2cdbf33d6776e50e2731ee Mon Sep 17 00:00:00 2001 From: Steven Leggett Date: Wed, 21 Jan 2026 00:06:29 -0500 Subject: [PATCH 3/3] fix: Update OpenCode setup tests to use --config flag for Windows compatibility --- tests/test_opencode_integration.py | 105 ++++++++++++++--------------- 1 file changed, 50 insertions(+), 55 deletions(-) diff --git a/tests/test_opencode_integration.py b/tests/test_opencode_integration.py index 0c84575..a08c1a0 100644 --- a/tests/test_opencode_integration.py +++ b/tests/test_opencode_integration.py @@ -924,14 +924,13 @@ def test_setup_finds_config_file(self, tmp_path): from sugar.main import cli # Create a mock OpenCode config - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" config_file.write_text('{"$schema": "https://opencode.ai/config.json"}') runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup", "--dry-run"]) + result = runner.invoke( + cli, ["opencode", "setup", "--dry-run", "--config", str(config_file)] + ) assert result.exit_code == 0 assert "Found config" in result.output @@ -944,15 +943,14 @@ def test_setup_dry_run_no_changes(self, tmp_path): from sugar.main import cli # Create a mock OpenCode config - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" original_content = '{"$schema": "https://opencode.ai/config.json"}' config_file.write_text(original_content) runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup", "--dry-run"]) + result = runner.invoke( + cli, ["opencode", "setup", "--dry-run", "--config", str(config_file)] + ) assert result.exit_code == 0 assert "Dry run" in result.output @@ -966,14 +964,13 @@ def test_setup_adds_mcp_servers(self, tmp_path): import json # Create a mock OpenCode config - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" config_file.write_text('{"$schema": "https://opencode.ai/config.json"}') runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup", "--yes"]) + result = runner.invoke( + cli, ["opencode", "setup", "--yes", "--config", str(config_file)] + ) assert result.exit_code == 0 assert "Config updated" in result.output @@ -996,9 +993,7 @@ def test_setup_preserves_existing_config(self, tmp_path): import json # Create a mock OpenCode config with existing settings - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" config_file.write_text( json.dumps( { @@ -1010,8 +1005,9 @@ def test_setup_preserves_existing_config(self, tmp_path): ) runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup", "--yes"]) + result = runner.invoke( + cli, ["opencode", "setup", "--yes", "--config", str(config_file)] + ) assert result.exit_code == 0 @@ -1029,21 +1025,22 @@ def test_setup_idempotent(self, tmp_path): import json # Create a mock OpenCode config - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" config_file.write_text('{"$schema": "https://opencode.ai/config.json"}') runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - # Run setup first time - result1 = runner.invoke(cli, ["opencode", "setup", "--yes"]) - assert result1.exit_code == 0 + # Run setup first time + result1 = runner.invoke( + cli, ["opencode", "setup", "--yes", "--config", str(config_file)] + ) + assert result1.exit_code == 0 - # Run setup second time - result2 = runner.invoke(cli, ["opencode", "setup", "--yes"]) - assert result2.exit_code == 0 - assert "already configured" in result2.output + # Run setup second time + result2 = runner.invoke( + cli, ["opencode", "setup", "--yes", "--config", str(config_file)] + ) + assert result2.exit_code == 0 + assert "already configured" in result2.output def test_setup_no_config_file_error(self, tmp_path): """Test error when no OpenCode config file exists""" @@ -1051,9 +1048,11 @@ def test_setup_no_config_file_error(self, tmp_path): from sugar.main import cli runner = CliRunner() - # Use empty temp dir with no config files - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup"]) + # Use non-existent config file path + result = runner.invoke( + cli, + ["opencode", "setup", "--config", str(tmp_path / "nonexistent.json")], + ) assert result.exit_code == 1 assert "Could not find OpenCode config file" in result.output @@ -1065,14 +1064,14 @@ def test_setup_no_memory_flag(self, tmp_path): import json # Create a mock OpenCode config - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" config_file.write_text('{"$schema": "https://opencode.ai/config.json"}') runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup", "--yes", "--no-memory"]) + result = runner.invoke( + cli, + ["opencode", "setup", "--yes", "--no-memory", "--config", str(config_file)], + ) assert result.exit_code == 0 @@ -1087,14 +1086,14 @@ def test_setup_no_tasks_flag(self, tmp_path): import json # Create a mock OpenCode config - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" config_file.write_text('{"$schema": "https://opencode.ai/config.json"}') runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup", "--yes", "--no-tasks"]) + result = runner.invoke( + cli, + ["opencode", "setup", "--yes", "--no-tasks", "--config", str(config_file)], + ) assert result.exit_code == 0 @@ -1108,14 +1107,11 @@ def test_setup_malformed_json_error(self, tmp_path): from sugar.main import cli # Create a malformed config - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" config_file.write_text("{ invalid json }") runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup"]) + result = runner.invoke(cli, ["opencode", "setup", "--config", str(config_file)]) assert result.exit_code == 1 assert "Failed to parse config file" in result.output @@ -1127,19 +1123,18 @@ def test_setup_parses_jsonc(self, tmp_path): import json # Create a JSONC config with comments - config_dir = tmp_path / ".config" / "opencode" - config_dir.mkdir(parents=True) - config_file = config_dir / "opencode.json" + config_file = tmp_path / "opencode.json" jsonc_content = """{ // This is a comment "$schema": "https://opencode.ai/config.json", - "plugin": ["test"], // trailing comment + "plugin": ["test"] // trailing comment }""" config_file.write_text(jsonc_content) runner = CliRunner() - with patch.dict(os.environ, {"HOME": str(tmp_path)}): - result = runner.invoke(cli, ["opencode", "setup", "--yes"]) + result = runner.invoke( + cli, ["opencode", "setup", "--yes", "--config", str(config_file)] + ) assert result.exit_code == 0 assert "Config updated" in result.output