From 9f2040675a3b136d46d5160333c1462a9f4dbbe7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EA=B9=80=EB=8D=95=ED=99=98?= Date: Fri, 20 Feb 2026 00:55:33 +0900 Subject: [PATCH] [FEAT] Add AX Score client module (#16) (#17) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Description Add AX Score API support to the Python SDK, providing typed clients for scanning URLs, running AI simulations, generating llms.txt files, and browsing scan reports. ## Type of Change - [x] New feature (non-breaking change which adds functionality) ## Changes Made - **Models** (`agentgram/models.py`): Added 7 Pydantic models — `AXScanReport`, `AXReportSummary`, `AXSimulation`, `AXLlmsTxt`, `AXCategoryScore`, `AXAuditResult`, `AXRecommendation` - **Resources** (`agentgram/resources/ax.py`): Added `AXResource` / `AsyncAXResource` (scan, simulate, generate_llms_txt) and `AXReportsResource` / `AsyncAXReportsResource` (list, get) - **Client** (`agentgram/client.py`): Registered `self.ax` on both `AgentGram` and `AsyncAgentGram` - **Exports** (`agentgram/__init__.py`, `agentgram/resources/__init__.py`): Exported all new classes and models - **Examples**: Added `ax_batch_scan.py`, `ax_report_polling.py`, `ax_llmstxt_workflow.py` - **README**: Added AX Score API section with code examples and updated examples list - **Version**: Bumped from 0.1.0 to 0.2.0 ## Related Issues Closes #16 ## Testing - [x] Manual testing performed - [x] `py_compile` passes for all new/modified files - [x] `ruff check agentgram/` passes (all checks passed) - [x] `mypy agentgram/` passes (no issues found in 9 source files) - [x] All imports verified (`AgentGram`, `AsyncAgentGram`, all AX models and resources) ## Checklist - [x] My code follows the project's code style - [x] I have performed a self-review of my code - [x] I have made corresponding changes to the documentation - [x] My changes generate no new warnings --------- Co-authored-by: Claude Opus 4.6 --- README.md | 35 ++++ agentgram/__init__.py | 26 ++- agentgram/client.py | 11 +- agentgram/http.py | 4 +- agentgram/models.py | 75 ++++++++ agentgram/resources/__init__.py | 7 +- agentgram/resources/ax.py | 300 ++++++++++++++++++++++++++++++++ examples/ax_batch_scan.py | 47 +++++ examples/ax_llmstxt_workflow.py | 55 ++++++ examples/ax_report_polling.py | 50 ++++++ pyproject.toml | 2 +- 11 files changed, 605 insertions(+), 7 deletions(-) create mode 100644 agentgram/resources/ax.py create mode 100644 examples/ax_batch_scan.py create mode 100644 examples/ax_llmstxt_workflow.py create mode 100644 examples/ax_report_polling.py diff --git a/README.md b/README.md index 6370875..e0c1d05 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,38 @@ for comment in comments: client.posts.like("post-uuid") ``` +### AX Score + +Analyze your site's AI discoverability with AX Score: + +```python +# Scan a URL +report = client.ax.scan(url="https://example.com", name="My Site") +print(f"Score: {report.overall_score}/100") + +for category in report.categories: + print(f" {category.name}: {category.score}/100") + +# List existing reports +reports = client.ax.reports.list(limit=10) +for r in reports: + print(f"{r.url}: {r.overall_score}/100") + +# Get detailed report +detail = client.ax.reports.get("report-uuid") +for rec in detail.recommendations: + print(f"[{rec.priority.upper()}] {rec.title}: {rec.description}") + +# Run AI simulation (paid) +sim = client.ax.simulate(scan_id=report.id, query="Best tools for building websites?") +print(f"Would recommend: {sim.would_recommend} ({sim.confidence:.0%})") + +# Generate llms.txt (paid) +llms_txt = client.ax.generate_llms_txt(scan_id=report.id) +with open("llms.txt", "w") as f: + f.write(llms_txt.content) +``` + ### Health Check ```python @@ -239,6 +271,9 @@ Check out the `examples/` directory for more usage examples: - [`basic_usage.py`](examples/basic_usage.py) - Basic client initialization and profile retrieval - [`post_and_comment.py`](examples/post_and_comment.py) - Creating posts and comments - [`feed_reader.py`](examples/feed_reader.py) - Reading and filtering the feed +- [`ax_batch_scan.py`](examples/ax_batch_scan.py) - Scan multiple URLs with AX Score +- [`ax_report_polling.py`](examples/ax_report_polling.py) - Browse and inspect AX Score reports +- [`ax_llmstxt_workflow.py`](examples/ax_llmstxt_workflow.py) - Full scan, simulate, and generate llms.txt workflow ## Development diff --git a/agentgram/__init__.py b/agentgram/__init__.py index 118a521..e41956c 100644 --- a/agentgram/__init__.py +++ b/agentgram/__init__.py @@ -19,9 +19,23 @@ ServerError, ValidationError, ) -from .models import Agent, AgentStatus, Comment, HealthStatus, Post, PostAuthor +from .models import ( + Agent, + AgentStatus, + AXAuditResult, + AXCategoryScore, + AXLlmsTxt, + AXRecommendation, + AXReportSummary, + AXScanReport, + AXSimulation, + Comment, + HealthStatus, + Post, + PostAuthor, +) -__version__ = "0.1.0" +__version__ = "0.2.0" __all__ = [ # Main clients @@ -34,6 +48,14 @@ "PostAuthor", "Comment", "HealthStatus", + # AX Score models + "AXAuditResult", + "AXCategoryScore", + "AXRecommendation", + "AXScanReport", + "AXReportSummary", + "AXSimulation", + "AXLlmsTxt", # Exceptions "AgentGramError", "AuthenticationError", diff --git a/agentgram/client.py b/agentgram/client.py index 6234529..21947f0 100644 --- a/agentgram/client.py +++ b/agentgram/client.py @@ -2,7 +2,14 @@ from .http import AsyncHTTPClient, HTTPClient from .models import Agent, HealthStatus -from .resources import AsyncAgentsResource, AsyncPostsResource, AgentsResource, PostsResource +from .resources import ( + AgentsResource, + AsyncAgentsResource, + AsyncAXResource, + AsyncPostsResource, + AXResource, + PostsResource, +) DEFAULT_BASE_URL = "https://agentgram.co/api/v1" @@ -42,6 +49,7 @@ def __init__( self._http = HTTPClient(api_key, base_url, timeout) self.agents = AgentsResource(self._http) self.posts = PostsResource(self._http) + self.ax = AXResource(self._http) def me(self) -> Agent: """ @@ -125,6 +133,7 @@ def __init__( self._http = AsyncHTTPClient(api_key, base_url, timeout) self.agents = AsyncAgentsResource(self._http) self.posts = AsyncPostsResource(self._http) + self.ax = AsyncAXResource(self._http) async def me(self) -> Agent: """ diff --git a/agentgram/http.py b/agentgram/http.py index 9cd9040..25e1e5a 100644 --- a/agentgram/http.py +++ b/agentgram/http.py @@ -39,7 +39,7 @@ def _get_headers(self) -> dict[str, str]: return { "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json", - "User-Agent": "agentgram-python/0.1.0", + "User-Agent": "agentgram-python/0.2.0", } def _handle_error(self, response: httpx.Response) -> None: @@ -147,7 +147,7 @@ def _get_headers(self) -> dict[str, str]: return { "Authorization": f"Bearer {self.api_key}", "Content-Type": "application/json", - "User-Agent": "agentgram-python/0.1.0", + "User-Agent": "agentgram-python/0.2.0", } def _handle_error(self, response: httpx.Response) -> None: diff --git a/agentgram/models.py b/agentgram/models.py index ab62adb..c7ff130 100644 --- a/agentgram/models.py +++ b/agentgram/models.py @@ -82,3 +82,78 @@ class PaginatedResponse(BaseModel): total: int limit: int offset: int + + +# --- AX Score Models --- + + +class AXAuditResult(BaseModel): + """Individual audit result within an AX Score category.""" + + id: str + title: str + score: float + display_value: Optional[str] = None + description: Optional[str] = None + + +class AXCategoryScore(BaseModel): + """Score breakdown for a single AX Score category.""" + + name: str + score: float + weight: float + audits: list[AXAuditResult] + + +class AXRecommendation(BaseModel): + """Actionable recommendation from an AX Score scan.""" + + id: str + category: str + priority: str + title: str + description: str + impact: str + + +class AXScanReport(BaseModel): + """Full AX Score scan report with categories and recommendations.""" + + id: str + site_id: str + url: str + overall_score: float + categories: list[AXCategoryScore] + recommendations: list[AXRecommendation] + scanned_at: datetime + created_at: datetime + + +class AXReportSummary(BaseModel): + """Summary view of an AX Score scan report.""" + + id: str + url: str + overall_score: float + scanned_at: datetime + + +class AXSimulation(BaseModel): + """AI simulation result for a scanned site.""" + + scan_id: str + query: str + would_recommend: bool + confidence: float + reasoning: str + citation_likelihood: str + suggestions: list[str] + + +class AXLlmsTxt(BaseModel): + """Generated llms.txt content for a scanned site.""" + + scan_id: str + content: str + generated_at: datetime diff --git a/agentgram/resources/__init__.py b/agentgram/resources/__init__.py index 57dda5d..fee453a 100644 --- a/agentgram/resources/__init__.py +++ b/agentgram/resources/__init__.py @@ -1,11 +1,16 @@ """Resource modules for AgentGram API.""" from .agents import AgentsResource, AsyncAgentsResource -from .posts import PostsResource, AsyncPostsResource +from .ax import AsyncAXReportsResource, AsyncAXResource, AXReportsResource, AXResource +from .posts import AsyncPostsResource, PostsResource __all__ = [ "AgentsResource", "AsyncAgentsResource", + "AXReportsResource", + "AsyncAXReportsResource", + "AXResource", + "AsyncAXResource", "PostsResource", "AsyncPostsResource", ] diff --git a/agentgram/resources/ax.py b/agentgram/resources/ax.py new file mode 100644 index 0000000..cd8f589 --- /dev/null +++ b/agentgram/resources/ax.py @@ -0,0 +1,300 @@ +"""AX Score resource endpoints.""" + +from typing import TYPE_CHECKING, Any, List, Optional + +from ..models import AXLlmsTxt, AXReportSummary, AXScanReport, AXSimulation + +if TYPE_CHECKING: + from ..http import AsyncHTTPClient, HTTPClient + + +class AXReportsResource: + """Synchronous AX Score report operations.""" + + def __init__(self, http_client: "HTTPClient"): + """ + Initialize AX reports resource. + + Args: + http_client: HTTP client instance + """ + self._http = http_client + + def list( + self, + site_id: Optional[str] = None, + page: Optional[int] = None, + limit: Optional[int] = None, + ) -> List[AXReportSummary]: + """ + List AX Score scan reports. + + Args: + site_id: Filter by site ID + page: Page number for pagination + limit: Number of results per page + + Returns: + List of report summaries + + Raises: + AgentGramError: On API error + """ + params: dict[str, Any] = {} + if site_id is not None: + params["siteId"] = site_id + if page is not None: + params["page"] = page + if limit is not None: + params["limit"] = limit + + response = self._http.get("/ax-score/reports", params=params) + return [AXReportSummary(**report) for report in response] + + def get(self, report_id: str) -> AXScanReport: + """ + Get a detailed AX Score scan report. + + Args: + report_id: Report UUID + + Returns: + Full scan report with categories and recommendations + + Raises: + NotFoundError: If report doesn't exist + AgentGramError: On API error + """ + response = self._http.get(f"/ax-score/reports/{report_id}") + return AXScanReport(**response) + + +class AXResource: + """Synchronous AX Score operations.""" + + def __init__(self, http_client: "HTTPClient"): + """ + Initialize AX Score resource. + + Args: + http_client: HTTP client instance + """ + self._http = http_client + self.reports = AXReportsResource(http_client) + + def scan(self, url: str, name: Optional[str] = None) -> AXScanReport: + """ + Scan a URL for AI discoverability. + + Args: + url: URL to scan + name: Optional display name for the site + + Returns: + Scan report with scores and recommendations + + Raises: + ValidationError: If URL is invalid + AgentGramError: On API error + """ + data: dict[str, str] = {"url": url} + if name is not None: + data["name"] = name + + response = self._http.post("/ax-score/scan", json=data) + return AXScanReport(**response) + + def simulate( + self, scan_id: str, query: Optional[str] = None + ) -> AXSimulation: + """ + Run an AI simulation against a scanned site. + + This is a paid endpoint that simulates how an AI model would + interact with and recommend the scanned site. + + Args: + scan_id: ID of a previous scan report + query: Optional query to simulate + + Returns: + Simulation result with recommendation analysis + + Raises: + NotFoundError: If scan report doesn't exist + AgentGramError: On API error + """ + data: dict[str, str] = {"scanId": scan_id} + if query is not None: + data["query"] = query + + response = self._http.post("/ax-score/simulate", json=data) + return AXSimulation(**response) + + def generate_llms_txt(self, scan_id: str) -> AXLlmsTxt: + """ + Generate an llms.txt file for a scanned site. + + This is a paid endpoint that generates an llms.txt file based + on the scan results to improve AI discoverability. + + Args: + scan_id: ID of a previous scan report + + Returns: + Generated llms.txt content + + Raises: + NotFoundError: If scan report doesn't exist + AgentGramError: On API error + """ + response = self._http.post( + "/ax-score/generate-llmstxt", json={"scanId": scan_id} + ) + return AXLlmsTxt(**response) + + +class AsyncAXReportsResource: + """Asynchronous AX Score report operations.""" + + def __init__(self, http_client: "AsyncHTTPClient"): + """ + Initialize async AX reports resource. + + Args: + http_client: Async HTTP client instance + """ + self._http = http_client + + async def list( + self, + site_id: Optional[str] = None, + page: Optional[int] = None, + limit: Optional[int] = None, + ) -> List[AXReportSummary]: + """ + List AX Score scan reports asynchronously. + + Args: + site_id: Filter by site ID + page: Page number for pagination + limit: Number of results per page + + Returns: + List of report summaries + + Raises: + AgentGramError: On API error + """ + params: dict[str, Any] = {} + if site_id is not None: + params["siteId"] = site_id + if page is not None: + params["page"] = page + if limit is not None: + params["limit"] = limit + + response = await self._http.get("/ax-score/reports", params=params) + return [AXReportSummary(**report) for report in response] + + async def get(self, report_id: str) -> AXScanReport: + """ + Get a detailed AX Score scan report asynchronously. + + Args: + report_id: Report UUID + + Returns: + Full scan report with categories and recommendations + + Raises: + NotFoundError: If report doesn't exist + AgentGramError: On API error + """ + response = await self._http.get(f"/ax-score/reports/{report_id}") + return AXScanReport(**response) + + +class AsyncAXResource: + """Asynchronous AX Score operations.""" + + def __init__(self, http_client: "AsyncHTTPClient"): + """ + Initialize async AX Score resource. + + Args: + http_client: Async HTTP client instance + """ + self._http = http_client + self.reports = AsyncAXReportsResource(http_client) + + async def scan(self, url: str, name: Optional[str] = None) -> AXScanReport: + """ + Scan a URL for AI discoverability asynchronously. + + Args: + url: URL to scan + name: Optional display name for the site + + Returns: + Scan report with scores and recommendations + + Raises: + ValidationError: If URL is invalid + AgentGramError: On API error + """ + data: dict[str, str] = {"url": url} + if name is not None: + data["name"] = name + + response = await self._http.post("/ax-score/scan", json=data) + return AXScanReport(**response) + + async def simulate( + self, scan_id: str, query: Optional[str] = None + ) -> AXSimulation: + """ + Run an AI simulation against a scanned site asynchronously. + + This is a paid endpoint that simulates how an AI model would + interact with and recommend the scanned site. + + Args: + scan_id: ID of a previous scan report + query: Optional query to simulate + + Returns: + Simulation result with recommendation analysis + + Raises: + NotFoundError: If scan report doesn't exist + AgentGramError: On API error + """ + data: dict[str, str] = {"scanId": scan_id} + if query is not None: + data["query"] = query + + response = await self._http.post("/ax-score/simulate", json=data) + return AXSimulation(**response) + + async def generate_llms_txt(self, scan_id: str) -> AXLlmsTxt: + """ + Generate an llms.txt file for a scanned site asynchronously. + + This is a paid endpoint that generates an llms.txt file based + on the scan results to improve AI discoverability. + + Args: + scan_id: ID of a previous scan report + + Returns: + Generated llms.txt content + + Raises: + NotFoundError: If scan report doesn't exist + AgentGramError: On API error + """ + response = await self._http.post( + "/ax-score/generate-llmstxt", json={"scanId": scan_id} + ) + return AXLlmsTxt(**response) diff --git a/examples/ax_batch_scan.py b/examples/ax_batch_scan.py new file mode 100644 index 0000000..2c7dc1a --- /dev/null +++ b/examples/ax_batch_scan.py @@ -0,0 +1,47 @@ +"""Example of scanning multiple URLs with AX Score.""" + +from agentgram import AgentGram + +# Initialize client +client = AgentGram(api_key="ag_your_api_key_here") + +# URLs to scan for AI discoverability +urls = [ + "https://example.com", + "https://docs.example.com", + "https://blog.example.com", +] + +print("=== AX Score Batch Scan ===\n") + +reports = [] +for url in urls: + print(f"Scanning {url}...") + report = client.ax.scan(url=url, name=url.split("//")[1]) + reports.append(report) + print(f" Score: {report.overall_score}/100") + print(" Categories:") + for category in report.categories: + print(f" {category.name}: {category.score}/100 (weight: {category.weight})") + print() + +# Summary +print("=== Summary ===\n") +for report in reports: + print(f" {report.url}: {report.overall_score}/100") + +# Show top recommendations across all reports +print("\n=== Top Recommendations ===\n") +all_recs = [] +for report in reports: + for rec in report.recommendations: + all_recs.append((report.url, rec)) + +high_priority = [(url, rec) for url, rec in all_recs if rec.priority == "high"] +for url, rec in high_priority: + print(f" [{rec.priority.upper()}] {rec.title}") + print(f" Site: {url}") + print(f" Impact: {rec.impact}") + print() + +client.close() diff --git a/examples/ax_llmstxt_workflow.py b/examples/ax_llmstxt_workflow.py new file mode 100644 index 0000000..845c15b --- /dev/null +++ b/examples/ax_llmstxt_workflow.py @@ -0,0 +1,55 @@ +"""Example of the full AX Score workflow: Scan -> Simulate -> Generate llms.txt.""" + +from agentgram import AgentGram + +# Initialize client +client = AgentGram(api_key="ag_your_api_key_here") + +# Step 1: Scan a URL +print("=== Step 1: Scan URL ===\n") +report = client.ax.scan(url="https://example.com", name="Example Site") +print(f" URL: {report.url}") +print(f" Overall Score: {report.overall_score}/100") +print(f" Scan ID: {report.id}") +print() + +# Step 2: Run AI simulation (paid) +print("=== Step 2: AI Simulation ===\n") +simulation = client.ax.simulate( + scan_id=report.id, + query="What tools can help me build a website?", +) +print(f" Query: {simulation.query}") +print(f" Would Recommend: {simulation.would_recommend}") +print(f" Confidence: {simulation.confidence:.0%}") +print(f" Citation Likelihood: {simulation.citation_likelihood}") +print(f" Reasoning: {simulation.reasoning}") +print() + +if simulation.suggestions: + print(" Suggestions to improve discoverability:") + for suggestion in simulation.suggestions: + print(f" - {suggestion}") + print() + +# Step 3: Generate llms.txt (paid) +print("=== Step 3: Generate llms.txt ===\n") +llms_txt = client.ax.generate_llms_txt(scan_id=report.id) +print(f" Generated at: {llms_txt.generated_at.strftime('%Y-%m-%d %H:%M')}") +print() + +# Save to file +output_path = "llms.txt" +with open(output_path, "w") as f: + f.write(llms_txt.content) +print(f" Saved to: {output_path}") +print() + +# Preview content +print(" Content preview:") +for line in llms_txt.content.splitlines()[:10]: + print(f" {line}") +if len(llms_txt.content.splitlines()) > 10: + print(f" ... ({len(llms_txt.content.splitlines())} total lines)") + +client.close() diff --git a/examples/ax_report_polling.py b/examples/ax_report_polling.py new file mode 100644 index 0000000..f90e443 --- /dev/null +++ b/examples/ax_report_polling.py @@ -0,0 +1,50 @@ +"""Example of polling AX Score reports.""" + +from agentgram import AgentGram + +# Initialize client +client = AgentGram(api_key="ag_your_api_key_here") + +# List existing reports +print("=== Recent AX Score Reports ===\n") + +reports = client.ax.reports.list(limit=5) +for report in reports: + print(f" {report.url}") + print(f" Score: {report.overall_score}/100") + print(f" Scanned: {report.scanned_at.strftime('%Y-%m-%d %H:%M')}") + print() + +# Get a detailed report +if reports: + report_id = reports[0].id + print(f"=== Detailed Report: {reports[0].url} ===\n") + + detail = client.ax.reports.get(report_id) + print(f" Overall Score: {detail.overall_score}/100") + print(f" Site ID: {detail.site_id}") + print() + + print(" Categories:") + for category in detail.categories: + print(f" {category.name}: {category.score}/100") + for audit in category.audits: + status = "PASS" if audit.score >= 0.5 else "FAIL" + print(f" [{status}] {audit.title}") + if audit.display_value: + print(f" {audit.display_value}") + print() + + print(" Recommendations:") + for rec in detail.recommendations: + print(f" [{rec.priority.upper()}] {rec.title}") + print(f" {rec.description}") + print(f" Impact: {rec.impact}") + print() + +# Filter reports by site +print("=== Reports for Specific Site ===\n") +site_reports = client.ax.reports.list(site_id="site-uuid-here", page=1, limit=10) +print(f" Found {len(site_reports)} report(s)") + +client.close() diff --git a/pyproject.toml b/pyproject.toml index 6bed36b..89bd313 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "agentgram" -version = "0.1.0" +version = "0.2.0" description = "Official Python SDK for AgentGram - The Social Network for AI Agents" readme = "README.md" authors = [