From 0569c734c0d93ce2c21cffdbc9dbb3cdbebdd1c5 Mon Sep 17 00:00:00 2001 From: hagavisi Date: Thu, 18 Dec 2025 14:39:48 +0530 Subject: [PATCH 01/11] Add OCI Autonomous Recovery Service MCP Server --- src/oci-recovery-mcp-server/LICENSE.txt | 35 + src/oci-recovery-mcp-server/README.md | 60 + .../oracle/__init__.py | 6 + .../oci_recovery_mcp_server/__init__.py | 8 + .../oracle/oci_recovery_mcp_server/models.py | 1225 +++++++++++++++++ .../oracle/oci_recovery_mcp_server/server.py | 841 +++++++++++ src/oci-recovery-mcp-server/pyproject.toml | 41 + 7 files changed, 2216 insertions(+) create mode 100644 src/oci-recovery-mcp-server/LICENSE.txt create mode 100644 src/oci-recovery-mcp-server/README.md create mode 100644 src/oci-recovery-mcp-server/oracle/__init__.py create mode 100644 src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/__init__.py create mode 100644 src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py create mode 100644 src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py create mode 100644 src/oci-recovery-mcp-server/pyproject.toml diff --git a/src/oci-recovery-mcp-server/LICENSE.txt b/src/oci-recovery-mcp-server/LICENSE.txt new file mode 100644 index 00000000..8dc7c070 --- /dev/null +++ b/src/oci-recovery-mcp-server/LICENSE.txt @@ -0,0 +1,35 @@ +Copyright (c) 2025 Oracle and/or its affiliates. + +The Universal Permissive License (UPL), Version 1.0 + +Subject to the condition set forth below, permission is hereby granted to any +person obtaining a copy of this software, associated documentation and/or data +(collectively the "Software"), free of charge and under any and all copyright +rights in the Software, and any and all patent rights owned or freely +licensable by each licensor hereunder covering either (i) the unmodified +Software as contributed to or provided by such licensor, or (ii) the Larger +Works (as defined below), to deal in both + +(a) the Software, and +(b) any piece of software and/or hardware listed in the lrgrwrks.txt file if +one is included with the Software (each a "Larger Work" to which the Software +is contributed by such licensors), + +without restriction, including without limitation the rights to copy, create +derivative works of, display, perform, and distribute the Software and make, +use, sell, offer for sale, import, export, have made, and have sold the +Software and the Larger Work(s), and to sublicense the foregoing rights on +either these or other terms. + +This license is subject to the following condition: +The above copyright notice and either this complete permission notice or at +a minimum a reference to the UPL must be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/src/oci-recovery-mcp-server/README.md b/src/oci-recovery-mcp-server/README.md new file mode 100644 index 00000000..d4d0e6e1 --- /dev/null +++ b/src/oci-recovery-mcp-server/README.md @@ -0,0 +1,60 @@ +# OCI Recovery Service MCP Server + +OCI Model Context Protocol (MCP) server exposing Oracle Cloud Recovery Service operations as MCP tools. + +## Features + +- List Protected Databases with rich filtering (compartment, lifecycle_state, display_name, id, protection_policy_id, recovery_service_subnet_id, limit, page, sort_order, sort_by, opc_request_id, region). +- Mapping of OCI SDK models to Pydantic for safe, serializable responses. + +## Install + +From this repository root: + +``` +make build +uv pip install ./src/oci-recovery-mcp-server +``` + +Or directly inside the package directory: + +``` +cd src/oci-recovery-mcp-server +uv build +uv pip install . +``` + +## Usage + +Run the MCP server (HTTP transport is optional): + +``` +# environment variables (optional) +export ORACLE_MCP_HOST=127.0.0.1 +export ORACLE_MCP_PORT=7337 + +# run +uv run oracle.oci-recovery-mcp-server +``` + +The server reads OCI auth from your OCI CLI config/profile: +- Uses the profile in $OCI_CONFIG_PROFILE (defaults to DEFAULT) +- Uses security token file signer with the private key specified in config + +## Tools + +- list_protected_databases(compartment_id, lifecycle_state=None, display_name=None, id=None, protection_policy_id=None, recovery_service_subnet_id=None, limit=None, page=None, sort_order=None, sort_by=None, opc_request_id=None, region=None) -> list[ProtectedDatabaseSummary] + +## Development + +- Code style/format/lint/test tasks are managed via Makefile: + - `make build` — builds all sub-packages + - `make install` — installs all sub-packages into current environment + - `make test` — runs unit tests + - `make lint` — runs linters + - `make format` — formats code + +## License + +Copyright (c) 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl. diff --git a/src/oci-recovery-mcp-server/oracle/__init__.py b/src/oci-recovery-mcp-server/oracle/__init__.py new file mode 100644 index 00000000..e5a3af0b --- /dev/null +++ b/src/oci-recovery-mcp-server/oracle/__init__.py @@ -0,0 +1,6 @@ +""" +Copyright (c) 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at +https://oss.oracle.com/licenses/upl. +""" + diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/__init__.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/__init__.py new file mode 100644 index 00000000..d07bb5fb --- /dev/null +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/__init__.py @@ -0,0 +1,8 @@ +""" +Copyright (c) 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at +https://oss.oracle.com/licenses/upl. +""" + +__project__ = "oracle.oci-recovery-mcp-server" +__version__ = "1.0.0" diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py new file mode 100644 index 00000000..5831e97b --- /dev/null +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py @@ -0,0 +1,1225 @@ +""" +Copyright (c) 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at +https://oss.oracle.com/licenses/upl. +""" + +from datetime import datetime +from typing import Any, Dict, Literal, Optional, List + +import oci +from pydantic import BaseModel, Field + + +def _oci_to_dict(obj): + """Best-effort conversion of OCI SDK model objects to plain dicts.""" + if obj is None: + return None + try: + from oci.util import to_dict as oci_to_dict + + return oci_to_dict(obj) + except Exception: + pass + if isinstance(obj, dict): + return obj + if hasattr(obj, "__dict__"): + return {k: v for k, v in obj.__dict__.items() if not k.startswith("_")} + return None + + +def _map_list(items, mapper): + """ + Safely map a sequence of SDK items to a list of Pydantic models using a mapper function. + Returns: + - None if items is None + - [] if items is an empty iterable + """ + if items is None: + return None + try: + return [mapper(it) for it in items] + except Exception: + out: list = [] + try: + for it in items or []: + out.append(mapper(it)) + return out + except Exception: + return None + + +class OCIBaseModel(BaseModel): + """Base model that supports conversion from OCI SDK models.""" + + model_config = {"arbitrary_types_allowed": True} + + +class ProtectedDatabaseHealthCounts(OCIBaseModel): + """ + Aggregated counts of Protected Database health in a compartment/region scope. + """ + compartment_id: Optional[str] = Field( + None, alias="compartmentId", description="The OCID of the compartment summarized." + ) + region: Optional[str] = Field( + None, alias="region", description="The OCI region used for the query (if specified)." + ) + protected: int = Field( + 0, alias="protected", description="Number of Protected Databases with health=PROTECTED." + ) + warning: int = Field( + 0, alias="warning", description="Number of Protected Databases with health=WARNING." + ) + alert: int = Field( + 0, alias="alert", description="Number of Protected Databases with health=ALERT." + ) + total: int = Field( + 0, alias="total", description="Total counted (protected + warning + alert)." + ) + + +class ProtectedDatabaseRedoCounts(OCIBaseModel): + """ + Aggregated counts of redo transport enablement for Protected Databases in a compartment/region scope. + """ + compartment_id: Optional[str] = Field( + None, alias="compartmentId", description="The OCID of the compartment summarized." + ) + region: Optional[str] = Field( + None, alias="region", description="The OCI region used for the query (if specified)." + ) + enabled: int = Field( + 0, alias="enabled", description="Count of Protected Databases with is_redo_logs_enabled = True." + ) + disabled: int = Field( + 0, alias="disabled", description="Count of Protected Databases with is_redo_logs_enabled = False." + ) + total: int = Field( + 0, alias="total", description="Total counted (enabled + disabled)." + ) + + +class ProtectedDatabaseBackupSpaceSum(OCIBaseModel): + """ + Sum of backup space used (GBs) across Protected Databases in a compartment/region scope. + """ + compartment_id: Optional[str] = Field( + None, alias="compartmentId", description="The OCID of the compartment summarized." + ) + region: Optional[str] = Field( + None, alias="region", description="The OCI region used for the query (if specified)." + ) + total_databases_scanned: int = Field( + 0, alias="totalDatabasesScanned", description="Number of Protected Databases scanned." + ) + sum_backup_space_used_in_gbs: float = Field( + 0.0, + alias="sumBackupSpaceUsedInGBs", + description="Sum of metrics.backup_space_used_in_gbs across all scanned Protected Databases.", + ) + + +# region ProtectedDatabase and nested types (oci.recovery.models) + + +class ProtectedDatabaseMetrics(OCIBaseModel): + """ + Pydantic model mirroring metrics object nested under + oci.recovery.models.ProtectedDatabase (if present). + This captures commonly used fields and remains tolerant to service evolution. + """ + + backup_space_used_in_gbs: Optional[float] = Field( + None, + description="Total backup space used by this protected database in GBs.", + ) + database_size_in_gbs: Optional[float] = Field( + None, description="Logical database size in GBs, if reported." + ) + recoverable_window_start_time: Optional[datetime] = Field( + None, description="Start of recoverable window (RFC3339), if reported." + ) + recoverable_window_end_time: Optional[datetime] = Field( + None, description="End of recoverable window (RFC3339), if reported." + ) + latest_backup_time: Optional[datetime] = Field( + None, description="Time of the latest successful backup (RFC3339), if reported." + ) + + +class ProtectedDatabase(OCIBaseModel): + """ + Pydantic model mirroring the fields of oci.recovery.models.ProtectedDatabase. + + This model includes commonly used attributes and remains permissive to + additional fields by relying on Pydantic's default behavior to ignore extras. + """ + + id: Optional[str] = Field(None, description="The OCID of the Protected Database.") + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing this Protected Database." + ) + display_name: Optional[str] = Field( + None, description="A user-friendly name for the Protected Database." + ) + + # Policy and networking attachments + protection_policy_id: Optional[str] = Field( + None, description="The OCID of the attached Protection Policy." + ) + recovery_service_subnet_id: Optional[str] = Field( + None, description="The OCID of the Recovery Service Subnet associated with this database." + ) + + # DB identification (may not always be present for all database types) + database_id: Optional[str] = Field( + None, description="The OCID of the backing database, where applicable." + ) + db_unique_name: Optional[str] = Field( + None, description="The DB_UNIQUE_NAME of the protected database, if available." + ) + vpc_user_name: Optional[str] = Field( + None, description="The VPC user name associated with the protected database, if available." + ) + database_size: Optional[ + Literal["XS", "S", "M", "L", "XL", "XXL", "AUTO", "UNKNOWN_ENUM_VALUE"] + ] = Field( + None, description="Configured database size category for the protected database." + ) + db_name: Optional[str] = Field( + None, description="The database name, if available." + ) + + # Status and health + lifecycle_state: Optional[ + Literal["CREATING", "ACTIVE", "UPDATING", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED"] + ] = Field(None, description="The current lifecycle state of the Protected Database.") + lifecycle_details: Optional[str] = Field( + None, description="Additional details about the current lifecycle state." + ) + health: Optional[ + Literal["PROTECTED", "WARNING", "ALERT"] + ] = Field( + None, + description="Service-evaluated health status: PROTECTED, WARNING, or ALERT.", + ) + + # Redo transport (for zero data loss RPO) + is_redo_logs_enabled: Optional[bool] = Field( + None, description="Whether redo transport is enabled for this Protected Database." + ) + + # Metrics + metrics: Optional[ProtectedDatabaseMetrics] = Field( + None, description="Metrics associated with this Protected Database." + ) + + # Timestamps + time_created: Optional[datetime] = Field( + None, description="The time the Protected Database was created (RFC3339)." + ) + time_updated: Optional[datetime] = Field( + None, description="The time the Protected Database was last updated (RFC3339)." + ) + + # Tags + freeform_tags: Optional[Dict[str, str]] = Field( + None, description="Free-form tags for this resource." + ) + defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="Defined tags for this resource." + ) + system_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="System tags for this resource." + ) + + +def map_protected_database_metrics( + m, +) -> ProtectedDatabaseMetrics | None: + """ + Convert nested metrics object to ProtectedDatabaseMetrics. + Accepts either an OCI SDK model instance or plain dict, returns Pydantic model. + """ + if not m: + return None + data = _oci_to_dict(m) or {} + return ProtectedDatabaseMetrics( + backup_space_used_in_gbs=getattr(m, "backup_space_used_in_gbs", None) + or data.get("backup_space_used_in_gbs") + or data.get("backupSpaceUsedInGbs"), + database_size_in_gbs=getattr(m, "database_size_in_gbs", None) + or data.get("database_size_in_gbs") + or data.get("databaseSizeInGbs"), + recoverable_window_start_time=getattr(m, "recoverable_window_start_time", None) + or data.get("recoverable_window_start_time") + or data.get("recoverableWindowStartTime"), + recoverable_window_end_time=getattr(m, "recoverable_window_end_time", None) + or data.get("recoverable_window_end_time") + or data.get("recoverableWindowEndTime"), + latest_backup_time=getattr(m, "latest_backup_time", None) + or data.get("latest_backup_time") + or data.get("latestBackupTime"), + ) + + +def map_protected_database( + pd: "oci.recovery.models.ProtectedDatabase", +) -> ProtectedDatabase | None: + """ + Convert an oci.recovery.models.ProtectedDatabase to + oracle.oci_recovery_mcp_server.models.ProtectedDatabase, + including nested metrics. + """ + if pd is None: + return None + + # Use getattr first; fall back to dict to be resilient to SDK variations. + data = _oci_to_dict(pd) or {} + + return ProtectedDatabase( + id=getattr(pd, "id", None) or data.get("id"), + compartment_id=getattr(pd, "compartment_id", None) or data.get("compartment_id"), + display_name=getattr(pd, "display_name", None) or data.get("display_name"), + protection_policy_id=getattr(pd, "protection_policy_id", None) + or data.get("protection_policy_id") + or data.get("protectionPolicyId"), + recovery_service_subnet_id=getattr(pd, "recovery_service_subnet_id", None) + or data.get("recovery_service_subnet_id") + or data.get("recoveryServiceSubnetId"), + database_id=getattr(pd, "database_id", None) or data.get("database_id"), + db_unique_name=getattr(pd, "db_unique_name", None) or data.get("db_unique_name"), + db_name=getattr(pd, "db_name", None) or data.get("db_name"), + lifecycle_state=getattr(pd, "lifecycle_state", None) or data.get("lifecycle_state"), + lifecycle_details=getattr(pd, "lifecycle_details", None) or data.get("lifecycle_details"), + health=getattr(pd, "health", None) or data.get("health"), + is_redo_logs_enabled=getattr(pd, "is_redo_logs_enabled", None) + or data.get("is_redo_logs_enabled") + or data.get("isRedoLogsEnabled"), + metrics=map_protected_database_metrics( + getattr(pd, "metrics", None) or data.get("metrics") + ), + time_created=getattr(pd, "time_created", None) or data.get("time_created"), + time_updated=getattr(pd, "time_updated", None) or data.get("time_updated"), + freeform_tags=getattr(pd, "freeform_tags", None) or data.get("freeform_tags"), + defined_tags=getattr(pd, "defined_tags", None) or data.get("defined_tags"), + system_tags=getattr(pd, "system_tags", None) or data.get("system_tags"), + ) + + +# region RecoveryServiceSubnet and nested types (oci.recovery.models) + + +class RecoveryServiceSubnet(OCIBaseModel): + """ + Pydantic model mirroring the fields of oci.recovery.models.RecoveryServiceSubnet. + """ + + id: Optional[str] = Field( + None, description="The OCID of the Recovery Service Subnet (RSS)." + ) + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing the RSS." + ) + display_name: Optional[str] = Field( + None, description="A user-friendly name for the RSS." + ) + vcn_id: Optional[str] = Field(None, description="The OCID of the VCN.") + subnet_id: Optional[str] = Field(None, description="The OCID of the subnet.") + nsg_ids: Optional[List[str]] = Field( + None, description="List of Network Security Group OCIDs attached to the RSS." + ) + lifecycle_state: Optional[ + Literal["CREATING", "ACTIVE", "UPDATING", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED"] + ] = Field(None, description="The current lifecycle state of the RSS.") + lifecycle_details: Optional[str] = Field( + None, description="Additional details about the RSS lifecycle." + ) + + time_created: Optional[datetime] = Field( + None, description="The time the RSS was created (RFC3339)." + ) + time_updated: Optional[datetime] = Field( + None, description="The time the RSS was last updated (RFC3339)." + ) + + freeform_tags: Optional[Dict[str, str]] = Field( + None, description="Free-form tags for this resource." + ) + defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="Defined tags for this resource." + ) + system_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="System tags for this resource." + ) + + +def map_recovery_service_subnet( + rss: "oci.recovery.models.RecoveryServiceSubnet", +) -> RecoveryServiceSubnet | None: + """ + Convert an oci.recovery.models.RecoveryServiceSubnet to + oracle.oci_recovery_mcp_server.models.RecoveryServiceSubnet. + """ + if rss is None: + return None + + data = _oci_to_dict(rss) or {} + + # Attempt to normalize NSG IDs list from various sources + nsgs = getattr(rss, "nsg_ids", None) or data.get("nsg_ids") or data.get("nsgIds") + if nsgs is not None: + try: + nsgs = list(nsgs) + except Exception: + nsgs = None + + return RecoveryServiceSubnet( + id=getattr(rss, "id", None) or data.get("id"), + compartment_id=getattr(rss, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + display_name=getattr(rss, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + vcn_id=getattr(rss, "vcn_id", None) or data.get("vcn_id") or data.get("vcnId"), + subnet_id=getattr(rss, "subnet_id", None) + or data.get("subnet_id") + or data.get("subnetId"), + nsg_ids=nsgs, + lifecycle_state=getattr(rss, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + lifecycle_details=getattr(rss, "lifecycle_details", None) + or data.get("lifecycle_details") + or data.get("lifecycleDetails"), + time_created=getattr(rss, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + time_updated=getattr(rss, "time_updated", None) + or data.get("time_updated") + or data.get("timeUpdated"), + freeform_tags=getattr(rss, "freeform_tags", None) + or data.get("freeform_tags") + or data.get("freeformTags"), + defined_tags=getattr(rss, "defined_tags", None) + or data.get("defined_tags") + or data.get("definedTags"), + system_tags=getattr(rss, "system_tags", None) + or data.get("system_tags") + or data.get("systemTags"), + ) + + +# endregion + +# region ProtectionPolicyCollection (oci.recovery.models) + + +class ProtectionPolicyCollection(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.ProtectionPolicyCollection. + """ + + items: Optional[List["ProtectionPolicySummary"]] = Field( + None, description="List of ProtectionPolicySummary items." + ) + + +def map_protection_policy_collection( + coll: "oci.recovery.models.ProtectionPolicyCollection", +) -> ProtectionPolicyCollection | None: + """ + Convert an oci.recovery.models.ProtectionPolicyCollection to + oracle.oci_recovery_mcp_server.models.ProtectionPolicyCollection. + """ + if coll is None: + return None + data = _oci_to_dict(coll) or {} + items = getattr(coll, "items", None) or data.get("items") + return ProtectionPolicyCollection( + items=_map_list(items, map_protection_policy_summary) + ) + + +# endregion + +# region ProtectedDatabase Summary and Collection + + +class ProtectedDatabaseSummary(OCIBaseModel): + """ + Pydantic model mirroring the fields of oci.recovery.models.ProtectedDatabaseSummary. + """ + + id: Optional[str] = Field(None, description="The OCID of the Protected Database.") + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing the Protected Database." + ) + display_name: Optional[str] = Field( + None, description="A user-friendly name for the Protected Database." + ) + protection_policy_id: Optional[str] = Field( + None, description="The OCID of the attached Protection Policy." + ) + recovery_service_subnet_id: Optional[str] = Field( + None, description="The OCID of the Recovery Service Subnet associated with this database." + ) + policy_locked_date_time: Optional[str] = Field( + None, description="Timestamp when the protection policy was locked (RFC3339 string)." + ) + recovery_service_subnets: Optional[List["RecoveryServiceSubnetDetails"]] = Field( + None, description="List of Recovery Service Subnet details associated with this protected database." + ) + database_id: Optional[str] = Field( + None, description="The OCID of the backing database, where applicable." + ) + db_unique_name: Optional[str] = Field( + None, description="The DB_UNIQUE_NAME of the protected database, if available." + ) + vpc_user_name: Optional[str] = Field( + None, description="The VPC user name associated with the protected database, if available." + ) + database_size: Optional[ + Literal["XS", "S", "M", "L", "XL", "XXL", "AUTO", "UNKNOWN_ENUM_VALUE"] + ] = Field( + None, description="Configured database size category." + ) + db_name: Optional[str] = Field(None, description="The database name, if available.") + lifecycle_state: Optional[ + Literal["CREATING", "ACTIVE", "UPDATING", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED"] + ] = Field(None, description="The current lifecycle state.") + health: Optional[ + Literal["PROTECTED", "WARNING", "ALERT"] + ] = Field(None, description="Health status.") + lifecycle_details: Optional[str] = Field( + None, description="Detailed description about the current lifecycle state of the protected database." + ) + health_details: Optional[str] = Field( + None, description="A message describing the current health of the protected database." + ) + is_read_only_resource: Optional[bool] = Field( + None, description="Indicates whether the protected database is created by the service (TRUE) or manually (FALSE)." + ) + metrics: Optional["MetricsSummary"] = Field( + None, description="Metrics summary associated with this protected database." + ) + subscription_id: Optional[str] = Field( + None, description="The OCID of the cloud service subscription linked to the protected database." + ) + is_redo_logs_enabled: Optional[bool] = Field( + None, description="Whether redo transport is enabled." + ) + time_created: Optional[datetime] = Field( + None, description="The time the Protected Database was created (RFC3339)." + ) + time_updated: Optional[datetime] = Field( + None, description="The time the Protected Database was last updated (RFC3339)." + ) + freeform_tags: Optional[Dict[str, str]] = Field( + None, description="Free-form tags." + ) + defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="Defined tags." + ) + system_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="System tags." + ) + + +def map_protected_database_summary( + pds: "oci.recovery.models.ProtectedDatabaseSummary", +) -> ProtectedDatabaseSummary | None: + if pds is None: + return None + data = _oci_to_dict(pds) or {} + rss_in = getattr(pds, "recovery_service_subnets", None) or data.get("recovery_service_subnets") or data.get("recoveryServiceSubnets") + return ProtectedDatabaseSummary( + id=getattr(pds, "id", None) or data.get("id"), + compartment_id=getattr(pds, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + display_name=getattr(pds, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + protection_policy_id=getattr(pds, "protection_policy_id", None) + or data.get("protection_policy_id") + or data.get("protectionPolicyId"), + policy_locked_date_time=getattr(pds, "policy_locked_date_time", None) + or data.get("policy_locked_date_time") + or data.get("policyLockedDateTime"), + recovery_service_subnets=_map_list(rss_in, map_recovery_service_subnet_details), + recovery_service_subnet_id=getattr(pds, "recovery_service_subnet_id", None) + or data.get("recovery_service_subnet_id") + or data.get("recoveryServiceSubnetId"), + database_id=getattr(pds, "database_id", None) + or data.get("database_id") + or data.get("databaseId"), + db_unique_name=getattr(pds, "db_unique_name", None) + or data.get("db_unique_name") + or data.get("dbUniqueName"), + vpc_user_name=getattr(pds, "vpc_user_name", None) + or data.get("vpc_user_name") + or data.get("vpcUserName"), + database_size=getattr(pds, "database_size", None) + or data.get("database_size") + or data.get("databaseSize"), + db_name=getattr(pds, "db_name", None) or data.get("db_name"), + lifecycle_state=getattr(pds, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + lifecycle_details=getattr(pds, "lifecycle_details", None) + or data.get("lifecycle_details") + or data.get("lifecycleDetails"), + health=getattr(pds, "health", None) or data.get("health"), + health_details=getattr(pds, "health_details", None) + or data.get("health_details") + or data.get("healthDetails"), + is_read_only_resource=getattr(pds, "is_read_only_resource", None) + or data.get("is_read_only_resource") + or data.get("isReadOnlyResource"), + is_redo_logs_enabled=getattr(pds, "is_redo_logs_enabled", None) + or data.get("is_redo_logs_enabled") + or data.get("isRedoLogsEnabled"), + metrics=map_metrics_summary(getattr(pds, "metrics", None) or data.get("metrics")), + subscription_id=getattr(pds, "subscription_id", None) + or data.get("subscription_id") + or data.get("subscriptionId"), + time_created=getattr(pds, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + time_updated=getattr(pds, "time_updated", None) + or data.get("time_updated") + or data.get("timeUpdated"), + freeform_tags=getattr(pds, "freeform_tags", None) + or data.get("freeform_tags") + or data.get("freeformTags"), + defined_tags=getattr(pds, "defined_tags", None) + or data.get("defined_tags") + or data.get("definedTags"), + system_tags=getattr(pds, "system_tags", None) + or data.get("system_tags") + or data.get("systemTags"), + ) + + +class ProtectedDatabaseCollection(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.ProtectedDatabaseCollection. + """ + + items: Optional[List[ProtectedDatabaseSummary]] = Field( + None, description="List of ProtectedDatabaseSummary items." + ) + + +def map_protected_database_collection( + coll: "oci.recovery.models.ProtectedDatabaseCollection", +) -> ProtectedDatabaseCollection | None: + if coll is None: + return None + data = _oci_to_dict(coll) or {} + items = getattr(coll, "items", None) or data.get("items") + return ProtectedDatabaseCollection( + items=_map_list(items, map_protected_database_summary) + ) + + +# endregion + + +# region RecoveryServiceSubnet Details/Input/Summary/Collection + + +class RecoveryServiceSubnetDetails(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.RecoveryServiceSubnetDetails. + Represents a detailed view of RSS properties. + """ + + id: Optional[str] = Field(None, description="The OCID of the RSS.") + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing the RSS." + ) + display_name: Optional[str] = Field(None, description="A user-friendly name.") + vcn_id: Optional[str] = Field(None, description="The OCID of the VCN.") + subnet_id: Optional[str] = Field(None, description="The OCID of the subnet.") + nsg_ids: Optional[List[str]] = Field( + None, description="List of NSG OCIDs associated to the RSS." + ) + lifecycle_state: Optional[ + Literal["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"] + ] = Field(None, description="The current lifecycle state.") + lifecycle_details: Optional[str] = Field( + None, description="Additional lifecycle details." + ) + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + time_updated: Optional[datetime] = Field( + None, description="Last update time (RFC3339)." + ) + freeform_tags: Optional[Dict[str, str]] = Field(None, description="Free-form tags.") + defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="Defined tags." + ) + system_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="System tags." + ) + + +def map_recovery_service_subnet_details( + det: "oci.recovery.models.RecoveryServiceSubnetDetails", +) -> RecoveryServiceSubnetDetails | None: + if det is None: + return None + data = _oci_to_dict(det) or {} + nsgs = getattr(det, "nsg_ids", None) or data.get("nsg_ids") or data.get("nsgIds") + if nsgs is not None: + try: + nsgs = list(nsgs) + except Exception: + nsgs = None + return RecoveryServiceSubnetDetails( + id=getattr(det, "id", None) or data.get("id"), + compartment_id=getattr(det, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + display_name=getattr(det, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + vcn_id=getattr(det, "vcn_id", None) or data.get("vcn_id") or data.get("vcnId"), + subnet_id=getattr(det, "subnet_id", None) + or data.get("subnet_id") + or data.get("subnetId"), + nsg_ids=nsgs, + lifecycle_state=getattr(det, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + lifecycle_details=getattr(det, "lifecycle_details", None) + or data.get("lifecycle_details") + or data.get("lifecycleDetails"), + time_created=getattr(det, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + time_updated=getattr(det, "time_updated", None) + or data.get("time_updated") + or data.get("timeUpdated"), + freeform_tags=getattr(det, "freeform_tags", None) + or data.get("freeform_tags") + or data.get("freeformTags"), + defined_tags=getattr(det, "defined_tags", None) + or data.get("defined_tags") + or data.get("definedTags"), + system_tags=getattr(det, "system_tags", None) + or data.get("system_tags") + or data.get("systemTags"), + ) + + +class RecoveryServiceSubnetInput(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.RecoveryServiceSubnetInput. + Represents the payload to create/update a Recovery Service Subnet. + """ + + display_name: Optional[str] = Field( + None, description="A user-friendly name for the RSS." + ) + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment for the RSS." + ) + vcn_id: Optional[str] = Field(None, description="The OCID of the VCN.") + subnet_id: Optional[str] = Field(None, description="The OCID of the subnet.") + nsg_ids: Optional[List[str]] = Field( + None, description="List of NSG OCIDs to associate." + ) + freeform_tags: Optional[Dict[str, str]] = Field(None, description="Free-form tags.") + defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="Defined tags." + ) + + +def map_recovery_service_subnet_input( + inp: "oci.recovery.models.RecoveryServiceSubnetInput", +) -> RecoveryServiceSubnetInput | None: + if inp is None: + return None + data = _oci_to_dict(inp) or {} + nsgs = getattr(inp, "nsg_ids", None) or data.get("nsg_ids") or data.get("nsgIds") + if nsgs is not None: + try: + nsgs = list(nsgs) + except Exception: + nsgs = None + return RecoveryServiceSubnetInput( + display_name=getattr(inp, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(inp, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + vcn_id=getattr(inp, "vcn_id", None) or data.get("vcn_id") or data.get("vcnId"), + subnet_id=getattr(inp, "subnet_id", None) + or data.get("subnet_id") + or data.get("subnetId"), + nsg_ids=nsgs, + freeform_tags=getattr(inp, "freeform_tags", None) + or data.get("freeform_tags") + or data.get("freeformTags"), + defined_tags=getattr(inp, "defined_tags", None) + or data.get("defined_tags") + or data.get("definedTags"), + ) + + +class RecoveryServiceSubnetSummary(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.RecoveryServiceSubnetSummary. + """ + + id: Optional[str] = Field(None, description="The OCID of the RSS.") + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing the RSS." + ) + display_name: Optional[str] = Field(None, description="A user-friendly name.") + vcn_id: Optional[str] = Field(None, description="The OCID of the VCN.") + subnet_id: Optional[str] = Field(None, description="The OCID of the subnet.") + nsg_ids: Optional[List[str]] = Field(None, description="List of NSG OCIDs.") + lifecycle_state: Optional[ + Literal["CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED"] + ] = Field(None, description="The current lifecycle state.") + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + freeform_tags: Optional[Dict[str, str]] = Field(None, description="Free-form tags.") + defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="Defined tags." + ) + system_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="System tags." + ) + + +def map_recovery_service_subnet_summary( + rss: "oci.recovery.models.RecoveryServiceSubnetSummary", +) -> RecoveryServiceSubnetSummary | None: + if rss is None: + return None + data = _oci_to_dict(rss) or {} + nsgs = getattr(rss, "nsg_ids", None) or data.get("nsg_ids") or data.get("nsgIds") + if nsgs is not None: + try: + nsgs = list(nsgs) + except Exception: + nsgs = None + return RecoveryServiceSubnetSummary( + id=getattr(rss, "id", None) or data.get("id"), + compartment_id=getattr(rss, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + display_name=getattr(rss, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + vcn_id=getattr(rss, "vcn_id", None) or data.get("vcn_id") or data.get("vcnId"), + subnet_id=getattr(rss, "subnet_id", None) + or data.get("subnet_id") + or data.get("subnetId"), + nsg_ids=nsgs, + lifecycle_state=getattr(rss, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + time_created=getattr(rss, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + freeform_tags=getattr(rss, "freeform_tags", None) + or data.get("freeform_tags") + or data.get("freeformTags"), + defined_tags=getattr(rss, "defined_tags", None) + or data.get("defined_tags") + or data.get("definedTags"), + system_tags=getattr(rss, "system_tags", None) + or data.get("system_tags") + or data.get("systemTags"), + ) + + +class RecoveryServiceSubnetCollection(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.RecoveryServiceSubnetCollection. + """ + + items: Optional[List[RecoveryServiceSubnetSummary]] = Field( + None, description="List of RecoveryServiceSubnetSummary items." + ) + + +def map_recovery_service_subnet_collection( + coll: "oci.recovery.models.RecoveryServiceSubnetCollection", +) -> RecoveryServiceSubnetCollection | None: + if coll is None: + return None + data = _oci_to_dict(coll) or {} + items = getattr(coll, "items", None) or data.get("items") + return RecoveryServiceSubnetCollection( + items=_map_list(items, map_recovery_service_subnet_summary) + ) + + +# endregion + +# region Metrics and MetricsSummary (oci.recovery.models) + + +class Metrics(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.Metrics. + Captures common Recovery metrics fields and remains tolerant to service evolution. + """ + + backup_space_used_in_gbs: Optional[float] = Field( + None, description="Total backup space used in GBs." + ) + database_size_in_gbs: Optional[float] = Field( + None, description="Logical database size in GBs, if reported." + ) + recoverable_window_start_time: Optional[datetime] = Field( + None, description="Start of recoverable window (RFC3339), if reported." + ) + recoverable_window_end_time: Optional[datetime] = Field( + None, description="End of recoverable window (RFC3339), if reported." + ) + latest_backup_time: Optional[datetime] = Field( + None, description="Time of the latest successful backup (RFC3339), if reported." + ) + + +def map_metrics(m) -> Metrics | None: + """ + Convert an oci.recovery.models.Metrics (or dict-like) to Metrics. + """ + if not m: + return None + data = _oci_to_dict(m) or {} + return Metrics( + backup_space_used_in_gbs=getattr(m, "backup_space_used_in_gbs", None) + or data.get("backup_space_used_in_gbs") + or data.get("backupSpaceUsedInGbs"), + database_size_in_gbs=getattr(m, "database_size_in_gbs", None) + or data.get("database_size_in_gbs") + or data.get("databaseSizeInGbs"), + recoverable_window_start_time=getattr( + m, "recoverable_window_start_time", None + ) + or data.get("recoverable_window_start_time") + or data.get("recoverableWindowStartTime"), + recoverable_window_end_time=getattr(m, "recoverable_window_end_time", None) + or data.get("recoverable_window_end_time") + or data.get("recoverableWindowEndTime"), + latest_backup_time=getattr(m, "latest_backup_time", None) + or data.get("latest_backup_time") + or data.get("latestBackupTime"), + ) + + +class MetricsSummary(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.MetricsSummary. + Contains a summarized view of recovery metrics over a period or scope. + """ + + backup_space_used_in_gbs: Optional[float] = Field( + None, description="Total backup space used in GBs for the scope of the summary." + ) + database_size_in_gbs: Optional[float] = Field( + None, description="Logical database size in GBs for the scope of the summary." + ) + recoverable_window_start_time: Optional[datetime] = Field( + None, + description="Start of recoverable window (RFC3339) covered by the summary, if reported.", + ) + recoverable_window_end_time: Optional[datetime] = Field( + None, + description="End of recoverable window (RFC3339) covered by the summary, if reported.", + ) + latest_backup_time: Optional[datetime] = Field( + None, + description="Time of the latest successful backup (RFC3339) in the summary window, if reported.", + ) + + +def map_metrics_summary(ms) -> MetricsSummary | None: + """ + Convert an oci.recovery.models.MetricsSummary (or dict-like) to MetricsSummary. + """ + if not ms: + return None + data = _oci_to_dict(ms) or {} + return MetricsSummary( + backup_space_used_in_gbs=getattr(ms, "backup_space_used_in_gbs", None) + or data.get("backup_space_used_in_gbs") + or data.get("backupSpaceUsedInGbs"), + database_size_in_gbs=getattr(ms, "database_size_in_gbs", None) + or data.get("database_size_in_gbs") + or data.get("databaseSizeInGbs"), + recoverable_window_start_time=getattr( + ms, "recoverable_window_start_time", None + ) + or data.get("recoverable_window_start_time") + or data.get("recoverableWindowStartTime"), + recoverable_window_end_time=getattr(ms, "recoverable_window_end_time", None) + or data.get("recoverable_window_end_time") + or data.get("recoverableWindowEndTime"), + latest_backup_time=getattr(ms, "latest_backup_time", None) + or data.get("latest_backup_time") + or data.get("latestBackupTime"), + ) + + +# endregion + +# region ProtectionPolicy (full) (oci.recovery.models) + + +class ProtectionPolicy(OCIBaseModel): + """ + Pydantic model mirroring the fields of oci.recovery.models.ProtectionPolicy. + Named ProtectionPolicy here as requested. + """ + + id: Optional[str] = Field( + None, description="The OCID of the protection policy." + ) + display_name: Optional[str] = Field( + None, description="A user-friendly name for the protection policy." + ) + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing the protection policy." + ) + backup_retention_period_in_days: Optional[int] = Field( + None, description="Exact number of days to retain backups created by Recovery Service." + ) + is_predefined_policy: Optional[bool] = Field( + None, description="Whether this is an Oracle-defined predefined policy." + ) + policy_locked_date_time: Optional[str] = Field( + None, description="When the protection policy was locked (RFC3339 string)." + ) + must_enforce_cloud_locality: Optional[bool] = Field( + None, description="Whether backup storage must stay in the same cloud locality as the database." + ) + time_created: Optional[datetime] = Field( + None, description="The time the protection policy was created (RFC3339)." + ) + time_updated: Optional[datetime] = Field( + None, description="The time the protection policy was last updated (RFC3339)." + ) + lifecycle_state: Optional[ + Literal[ + "CREATING", + "UPDATING", + "ACTIVE", + "DELETE_SCHEDULED", + "DELETING", + "DELETED", + "FAILED", + "UNKNOWN_ENUM_VALUE", + ] + ] = Field(None, description="The current lifecycle state of the protection policy.") + lifecycle_details: Optional[str] = Field( + None, description="Additional details about the current lifecycle state." + ) + freeform_tags: Optional[Dict[str, str]] = Field( + None, description="Free-form tags for this resource." + ) + defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="Defined tags for this resource." + ) + system_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="System tags for this resource." + ) + + +def map_protection_policy( + pp: "oci.recovery.models.ProtectionPolicy", +) -> ProtectionPolicy | None: + """ + Convert an oci.recovery.models.ProtectionPolicy (aka ProtectionPolicy here) to + oracle.oci_recovery_mcp_server.models.ProtectionPolicy. + """ + if pp is None: + return None + + data = _oci_to_dict(pp) or {} + + return ProtectionPolicy( + id=getattr(pp, "id", None) or data.get("id"), + display_name=getattr(pp, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(pp, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + backup_retention_period_in_days=getattr( + pp, "backup_retention_period_in_days", None + ) + or data.get("backup_retention_period_in_days") + or data.get("backupRetentionPeriodInDays"), + is_predefined_policy=getattr(pp, "is_predefined_policy", None) + or data.get("is_predefined_policy") + or data.get("isPredefinedPolicy"), + policy_locked_date_time=getattr(pp, "policy_locked_date_time", None) + or data.get("policy_locked_date_time") + or data.get("policyLockedDateTime"), + must_enforce_cloud_locality=getattr( + pp, "must_enforce_cloud_locality", None + ) + or data.get("must_enforce_cloud_locality") + or data.get("mustEnforceCloudLocality"), + time_created=getattr(pp, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + time_updated=getattr(pp, "time_updated", None) + or data.get("time_updated") + or data.get("timeUpdated"), + lifecycle_state=getattr(pp, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + lifecycle_details=getattr(pp, "lifecycle_details", None) + or data.get("lifecycle_details") + or data.get("lifecycleDetails"), + freeform_tags=getattr(pp, "freeform_tags", None) + or data.get("freeform_tags") + or data.get("freeformTags"), + defined_tags=getattr(pp, "defined_tags", None) + or data.get("defined_tags") + or data.get("definedTags"), + system_tags=getattr(pp, "system_tags", None) + or data.get("system_tags") + or data.get("systemTags"), + ) + + +# region ProtectionPolicySummary (oci.recovery.models) + + +class ProtectionPolicySummary(OCIBaseModel): + """ + Pydantic model mirroring oci.recovery.models.ProtectionPolicySummary. + """ + + id: Optional[str] = Field( + None, description="The OCID of the protection policy." + ) + display_name: Optional[str] = Field( + None, description="A user-friendly name for the protection policy." + ) + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing the protection policy." + ) + backup_retention_period_in_days: Optional[int] = Field( + None, description="Exact number of days to retain backups created by Recovery Service." + ) + is_predefined_policy: Optional[bool] = Field( + None, description="Whether this is an Oracle-defined predefined policy." + ) + policy_locked_date_time: Optional[str] = Field( + None, description="When the protection policy was locked (RFC3339 string)." + ) + must_enforce_cloud_locality: Optional[bool] = Field( + None, description="Whether backup storage must stay in the same cloud locality as the database." + ) + time_created: Optional[datetime] = Field( + None, description="The time the protection policy was created (RFC3339)." + ) + time_updated: Optional[datetime] = Field( + None, description="The time the protection policy was last updated (RFC3339)." + ) + lifecycle_state: Optional[ + Literal[ + "CREATING", + "UPDATING", + "ACTIVE", + "DELETE_SCHEDULED", + "DELETING", + "DELETED", + "FAILED", + "UNKNOWN_ENUM_VALUE", + ] + ] = Field(None, description="The current lifecycle state of the protection policy.") + lifecycle_details: Optional[str] = Field( + None, description="Additional details about the current lifecycle state." + ) + freeform_tags: Optional[Dict[str, str]] = Field( + None, description="Free-form tags for this resource." + ) + defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="Defined tags for this resource." + ) + system_tags: Optional[Dict[str, Dict[str, Any]]] = Field( + None, description="System tags for this resource." + ) + + +def map_protection_policy_summary( + pps: "oci.recovery.models.ProtectionPolicySummary", +) -> ProtectionPolicySummary | None: + """ + Convert an oci.recovery.models.ProtectionPolicySummary to + oracle.oci_recovery_mcp_server.models.ProtectionPolicySummary. + """ + if pps is None: + return None + + data = _oci_to_dict(pps) or {} + + return ProtectionPolicySummary( + id=getattr(pps, "id", None) or data.get("id"), + display_name=getattr(pps, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(pps, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + backup_retention_period_in_days=getattr( + pps, "backup_retention_period_in_days", None + ) + or data.get("backup_retention_period_in_days") + or data.get("backupRetentionPeriodInDays"), + is_predefined_policy=getattr(pps, "is_predefined_policy", None) + or data.get("is_predefined_policy") + or data.get("isPredefinedPolicy"), + policy_locked_date_time=getattr(pps, "policy_locked_date_time", None) + or data.get("policy_locked_date_time") + or data.get("policyLockedDateTime"), + must_enforce_cloud_locality=getattr( + pps, "must_enforce_cloud_locality", None + ) + or data.get("must_enforce_cloud_locality") + or data.get("mustEnforceCloudLocality"), + time_created=getattr(pps, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + time_updated=getattr(pps, "time_updated", None) + or data.get("time_updated") + or data.get("timeUpdated"), + lifecycle_state=getattr(pps, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + lifecycle_details=getattr(pps, "lifecycle_details", None) + or data.get("lifecycle_details") + or data.get("lifecycleDetails"), + freeform_tags=getattr(pps, "freeform_tags", None) + or data.get("freeform_tags") + or data.get("freeformTags"), + defined_tags=getattr(pps, "defined_tags", None) + or data.get("defined_tags") + or data.get("definedTags"), + system_tags=getattr(pps, "system_tags", None) + or data.get("system_tags") + or data.get("systemTags"), + ) + + +# endregion diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py new file mode 100644 index 00000000..3aae981a --- /dev/null +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -0,0 +1,841 @@ +""" +Copyright (c) 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at +https://oss.oracle.com/licenses/upl. +""" + +import os +import json +from logging import Logger +from typing import Annotated, Optional + +import oci +from fastmcp import FastMCP +from oci.monitoring.models import SummarizeMetricsDataDetails + +from oracle.oci_recovery_mcp_server.models import ( + ProtectedDatabaseSummary, + map_protected_database_summary, + ProtectedDatabase, + map_protected_database, + ProtectionPolicySummary, + map_protection_policy_summary, + ProtectionPolicy, + map_protection_policy, + RecoveryServiceSubnetSummary, + map_recovery_service_subnet_summary, + RecoveryServiceSubnet, + map_recovery_service_subnet, + ProtectedDatabaseHealthCounts, + ProtectedDatabaseRedoCounts, + ProtectedDatabaseBackupSpaceSum, +) +from . import __project__, __version__ + +logger = Logger(__name__, level="INFO") +mcp = FastMCP(name=__project__) + + +def get_recovery_client(region: str | None = None) -> oci.recovery.DatabaseRecoveryClient: + """ + Initialize DatabaseRecoveryClient using the OCI config and a SecurityTokenSigner. + Adds a custom user agent derived from the package name and version. + Optionally overrides the region. + """ + config = oci.config.from_file( + profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) + ) + user_agent_name = __project__.split("oracle.", 1)[1].split("-server", 1)[0] + config["additional_user_agent"] = f"{user_agent_name}/{__version__}" + + private_key = oci.signer.load_private_key_from_file(config["key_file"]) + token_file = config["security_token_file"] + with open(token_file, "r") as f: + token = f.read() + signer = oci.auth.signers.SecurityTokenSigner(token, private_key) + + if region is None: + return oci.recovery.DatabaseRecoveryClient(config, signer=signer) + + regional_config = config.copy() + regional_config["region"] = region + return oci.recovery.DatabaseRecoveryClient(regional_config, signer=signer) + +def get_identity_client(): + config = oci.config.from_file( + profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) + ) + user_agent_name = __project__.split("oracle.", 1)[1].split("-server", 1)[0] + config["additional_user_agent"] = f"{user_agent_name}/{__version__}" + private_key = oci.signer.load_private_key_from_file(config["key_file"]) + token_file = config["security_token_file"] + with open(token_file, "r") as f: + token = f.read() + signer = oci.auth.signers.SecurityTokenSigner(token, private_key) + return oci.identity.IdentityClient(config, signer=signer) + +def get_database_client(region: str = None): + config = oci.config.from_file( + profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) + ) + user_agent_name = __project__.split("oracle.", 1)[1].split("-server", 1)[0] + config["additional_user_agent"] = f"{user_agent_name}/{__version__}" + private_key = oci.signer.load_private_key_from_file(config["key_file"]) + token_file = config["security_token_file"] + with open(token_file, "r") as f: + token = f.read() + signer = oci.auth.signers.SecurityTokenSigner(token, private_key) + if region is None: + return oci.database.DatabaseClient(config, signer=signer) + regional_config = config.copy() # make a shallow copy + regional_config["region"] = region + return oci.database.DatabaseClient(regional_config, signer=signer) + +def get_monitoring_client(): + logger.info("entering get_monitoring_client") + config = oci.config.from_file( + profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) + ) + user_agent_name = __project__.split("oracle.", 1)[1].split("-server", 1)[0] + config["additional_user_agent"] = f"{user_agent_name}/{__version__}" + + private_key = oci.signer.load_private_key_from_file(config["key_file"]) + token_file = config["security_token_file"] + token = None + with open(token_file, "r") as f: + token = f.read() + signer = oci.auth.signers.SecurityTokenSigner(token, private_key) + return oci.monitoring.MonitoringClient(config, signer=signer) + +def get_tenancy(): + config = oci.config.from_file( + profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) + ) + return os.getenv("TENANCY_ID_OVERRIDE", config["tenancy"]) + + +def list_all_compartments_internal(only_one_page: bool, limit=100): + """Internal function to get List all compartments in a tenancy""" + identity_client = get_identity_client() + response = identity_client.list_compartments( + compartment_id=get_tenancy(), + compartment_id_in_subtree=True, + access_level="ACCESSIBLE", + lifecycle_state="ACTIVE", + limit=limit, + ) + compartments = response.data + compartments.append( + identity_client.get_compartment(compartment_id=get_tenancy()).data + ) + if only_one_page: # limiting the number of items returned + return compartments + while response.has_next_page: + response = identity_client.list_compartments( + compartment_id=get_tenancy(), + compartment_id_in_subtree=True, + access_level="ACCESSIBLE", + lifecycle_state="ACTIVE", + page=response.next_page, + limit=limit, + ) + compartments.extend(response.data) + + return compartments + + +def get_compartment_by_name(compartment_name: str): + """Internal function to get compartment by name with caching""" + compartments = list_all_compartments_internal(False) + # Search for the compartment by name + for compartment in compartments: + if compartment.name.lower() == compartment_name.lower(): + return compartment + + return None + +@mcp.tool() +def get_compartment_by_name_tool(name: str) -> str: + """Return a compartment matching the provided name""" + compartment = get_compartment_by_name(name) + if compartment: + return str(compartment) + else: + return json.dumps({"error": f"Compartment '{name}' not found."}) + +@mcp.tool( + description="List Protected Databases in a given compartment with optional filters." +) +def list_protected_databases( + compartment_id: Annotated[str, "The OCID of the compartment"], + lifecycle_state: Annotated[ + Optional[str], + 'Filter by lifecycle state (e.g., "CREATING", "UPDATING", "ACTIVE", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED")', + ] = None, + display_name: Annotated[Optional[str], "Exact match on display name"] = None, + id: Annotated[Optional[str], "Protected Database OCID"] = None, + protection_policy_id: Annotated[ + Optional[str], "Filter results to this Protection Policy OCID" + ] = None, + recovery_service_subnet_id: Annotated[ + Optional[str], "Filter by Recovery Service Subnet OCID" + ] = None, + limit: Annotated[Optional[int], "Maximum number of items per page"] = None, + page: Annotated[ + Optional[str], + "Pagination token (opc-next-page) to continue listing from", + ] = None, + sort_order: Annotated[ + Optional[str], 'Sort order: "ASC" or "DESC"' + ] = None, + sort_by: Annotated[ + Optional[str], 'Sort by field: "timeCreated" or "displayName"' + ] = None, + opc_request_id: Annotated[ + Optional[str], "Unique identifier for the request" + ] = None, + region: Annotated[ + Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" + ] = None, +) -> list[ProtectedDatabaseSummary]: + """ + Paginates through Recovery Service to list Protected Databases and returns + a list of ProtectedDatabaseSummary models mapped from the OCI SDK response. + """ + try: + client = get_recovery_client(region) + + results: list[ProtectedDatabaseSummary] = [] + has_next_page = True + next_page: Optional[str] = page + + while has_next_page: + kwargs = { + "compartment_id": compartment_id, + "page": next_page, + } + if lifecycle_state is not None: + kwargs["lifecycle_state"] = lifecycle_state + if display_name is not None: + kwargs["display_name"] = display_name + if id is not None: + kwargs["id"] = id + if protection_policy_id is not None: + kwargs["protection_policy_id"] = protection_policy_id + if recovery_service_subnet_id is not None: + kwargs["recovery_service_subnet_id"] = recovery_service_subnet_id + if limit is not None: + kwargs["limit"] = limit + if sort_order is not None: + kwargs["sort_order"] = sort_order + if sort_by is not None: + kwargs["sort_by"] = sort_by + if opc_request_id is not None: + kwargs["opc_request_id"] = opc_request_id + + response: oci.response.Response = client.list_protected_databases(**kwargs) + has_next_page = response.has_next_page + next_page = response.next_page if hasattr(response, "next_page") else None + + data = response.data + items = getattr(data, "items", data) # collection.items or raw list + for d in items: + pd_summary = map_protected_database_summary(d) + if pd_summary is not None: + results.append(pd_summary) + + logger.info(f"Found {len(results)} Protected Databases") + return results + + except Exception as e: + logger.error(f"Error in list_protected_databases tool: {str(e)}") + raise + + +@mcp.tool(description="Get a Protected Database by OCID.") +def get_protected_database( + protected_database_id: Annotated[str, "Protected Database OCID"], + opc_request_id: Annotated[Optional[str], "Unique identifier for the request"] = None, + region: Annotated[ + Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" + ] = None, +) -> ProtectedDatabase: + """ + Retrieves a single Protected Database resource from Recovery Service and returns + a ProtectedDatabase model mapped from the OCI SDK response. + """ + try: + client = get_recovery_client(region) + + kwargs = {} + if opc_request_id is not None: + kwargs["opc_request_id"] = opc_request_id + + response: oci.response.Response = client.get_protected_database( + protected_database_id=protected_database_id, **kwargs + ) + + data = response.data + pd = map_protected_database(data) + logger.info(f"Fetched Protected Database {protected_database_id}") + return pd + + except Exception as e: + logger.error(f"Error in get_protected_database tool: {str(e)}") + raise + + +@mcp.tool(description="Summarizes Protected Database health status counts (PROTECTED, WARNING, ALERT) in a compartment. Lists protected databases then fetches each to read its health field; returns counts.") +def summarize_protected_database_health( + compartment_id: Annotated[ + Optional[str], + "OCID of the compartment. If omitted, defaults to the tenancy OCID from your OCI profile.", + ] = None, + region: Annotated[ + Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" + ] = None, +) -> ProtectedDatabaseHealthCounts: + """ + Summarizes Protected Database health status counts (PROTECTED, WARNING, ALERT) in a compartment. + The tool lists protected databases then fetches each to read its health field; returns counts. + """ + try: + client = get_recovery_client(region) + comp_id = compartment_id or get_tenancy() + + protected = 0 + warning = 0 + alert = 0 + + has_next_page = True + next_page: Optional[str] = None + + while has_next_page: + list_kwargs = { + "compartment_id": comp_id, + "page": next_page, + } + response: oci.response.Response = client.list_protected_databases(**list_kwargs) + has_next_page = response.has_next_page + next_page = response.next_page if hasattr(response, "next_page") else None + + data = response.data + items = getattr(data, "items", data) + for item in items or []: + pd_id = getattr(item, "id", None) or (getattr(item, "data", None) and getattr(item.data, "id", None)) + if pd_id is None: + try: + item_dict = getattr(item, "__dict__", None) or {} + pd_id = item_dict.get("id") + except Exception: + pd_id = None + if not pd_id: + continue + + pd_resp: oci.response.Response = client.get_protected_database(protected_database_id=pd_id) + pd = pd_resp.data + health = getattr(pd, "health", None) + if not health and hasattr(pd, "__dict__"): + health = pd.__dict__.get("health") + + if health == "PROTECTED": + protected += 1 + elif health == "WARNING": + warning += 1 + elif health == "ALERT": + alert += 1 + else: + # unknown/None health -> not counted in the three buckets + pass + + total = protected + warning + alert + logger.info( + f"Health summary for compartment {comp_id} (region={region}): " + f"PROTECTED={protected}, WARNING={warning}, ALERT={alert}, TOTAL={total}" + ) + return ProtectedDatabaseHealthCounts( + compartment_id=comp_id, + region=region, + protected=protected, + warning=warning, + alert=alert, + total=total, + ) + except Exception as e: + logger.error(f"Error in summarize_protected_database_health tool: {str(e)}") + raise + + +@mcp.tool(description="Summarizes redo transport enablement for Protected Databases in a compartment. Lists protected databases then fetches each to inspect is_redo_logs_enabled (true=enabled, false=disabled).") +def summarize_protected_database_redo_status( + compartment_id: Annotated[ + Optional[str], + "OCID of the compartment. If omitted, defaults to the tenancy OCID from your OCI profile.", + ] = None, + region: Annotated[ + Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" + ] = None, +) -> ProtectedDatabaseRedoCounts: + """ + Summarizes redo transport enablement for Protected Databases in a compartment. + Lists protected databases then fetches each to inspect is_redo_logs_enabled (true=enabled, false=disabled). + """ + try: + client = get_recovery_client(region) + comp_id = compartment_id or get_tenancy() + + enabled = 0 + disabled = 0 + + has_next_page = True + next_page: Optional[str] = None + + while has_next_page: + list_kwargs = { + "compartment_id": comp_id, + "page": next_page, + } + response: oci.response.Response = client.list_protected_databases(**list_kwargs) + has_next_page = response.has_next_page + next_page = response.next_page if hasattr(response, "next_page") else None + + data = response.data + items = getattr(data, "items", data) + for item in items or []: + # Robustly get the PD OCID from summary item + pd_id = getattr(item, "id", None) or (getattr(item, "data", None) and getattr(item.data, "id", None)) + if pd_id is None: + try: + item_dict = getattr(item, "__dict__", None) or {} + pd_id = item_dict.get("id") + except Exception: + pd_id = None + if not pd_id: + continue + + # Fetch full Protected Database to read is_redo_logs_enabled + pd_resp: oci.response.Response = client.get_protected_database(protected_database_id=pd_id) + pd = pd_resp.data + redo_enabled = getattr(pd, "is_redo_logs_enabled", None) + if redo_enabled is None and hasattr(pd, "__dict__"): + redo_enabled = pd.__dict__.get("is_redo_logs_enabled") or pd.__dict__.get("isRedoLogsEnabled") + + if redo_enabled is True: + enabled += 1 + elif redo_enabled is False: + disabled += 1 + else: + # None/unknown -> do not count + pass + + total = enabled + disabled + logger.info( + f"Redo transport summary for compartment {comp_id} (region={region}): " + f"ENABLED={enabled}, DISABLED={disabled}, TOTAL={total}" + ) + return ProtectedDatabaseRedoCounts( + compartment_id=comp_id, + region=region, + enabled=enabled, + disabled=disabled, + total=total, + ) + except Exception as e: + logger.error(f"Error in summarize_protected_database_redo_status tool: {str(e)}") + raise + + +@mcp.tool(description="Sums backup space used (GB) by Protected Databases in a compartment. Lists protected databases in the compartment, fetches each, reads metrics.backup_space_used_in_gbs (or variants), and returns the total.") +def summarize_backup_space_used( + compartment_id: Annotated[ + Optional[str], + "OCID of the compartment. If omitted, defaults to the tenancy OCID from your OCI profile.", + ] = None, + region: Annotated[ + Optional[str], + "Canonical OCI region (e.g., us-ashburn-1) to execute the request in.", + ] = None, +) -> ProtectedDatabaseBackupSpaceSum: + """ + Sums backup space used (GB) by Protected Databases in a compartment. + Lists protected databases in the compartment, fetches each, reads metrics.backup_space_used_in_gbs + (or variants), and returns the total as a ProtectedDatabaseBackupSpaceSum model. + """ + try: + client = get_recovery_client(region) + comp_id = compartment_id or get_tenancy() + + total_scanned: int = 0 + sum_gb: float = 0.0 + + has_next_page = True + next_page: Optional[str] = None + + while has_next_page: + list_kwargs = { + "compartment_id": comp_id, + "page": next_page, + } + response: oci.response.Response = client.list_protected_databases(**list_kwargs) + has_next_page = response.has_next_page + next_page = response.next_page if hasattr(response, "next_page") else None + + data = response.data + items = getattr(data, "items", data) + + for item in items or []: + # Extract PD OCID from the summary item robustly + pd_id = getattr(item, "id", None) or (getattr(item, "data", None) and getattr(item.data, "id", None)) + if pd_id is None: + try: + item_dict = getattr(item, "__dict__", None) or {} + pd_id = item_dict.get("id") + except Exception: + pd_id = None + if not pd_id: + continue + + total_scanned += 1 + + # Fetch full PD to read metrics + pd_resp: oci.response.Response = client.get_protected_database(protected_database_id=pd_id) + pd = pd_resp.data + + # Try PD.metrics.backup_space_used_in_gbs (and variants) + metrics = getattr(pd, "metrics", None) + val = None + if metrics is not None: + val = getattr(metrics, "backup_space_used_in_gbs", None) + if val is None and hasattr(metrics, "__dict__"): + val = metrics.__dict__.get("backup_space_used_in_gbs") or metrics.__dict__.get("backupSpaceUsedInGbs") + + # Fallback to summary metrics on list item if full PD had none + if val is None: + try: + item_metrics = getattr(item, "metrics", None) + if item_metrics is not None: + val = getattr(item_metrics, "backup_space_used_in_gbs", None) + if val is None and hasattr(item_metrics, "__dict__"): + val = item_metrics.__dict__.get("backup_space_used_in_gbs") or item_metrics.__dict__.get("backupSpaceUsedInGbs") + except Exception: + pass + + # Accumulate if numeric + try: + if val is not None: + sum_gb += float(val) + except Exception: + # Ignore non-numeric values gracefully + pass + + logger.info( + f"Backup space used summary for compartment {comp_id} (region={region}): " + f"scanned={total_scanned}, sum_gb={sum_gb}" + ) + return ProtectedDatabaseBackupSpaceSum( + compartment_id=comp_id, + region=region, + total_databases_scanned=total_scanned, + sum_backup_space_used_in_gbs=sum_gb, + ) + except Exception as e: + logger.error(f"Error in summarize_backup_space_used tool: {str(e)}") + raise + + +@mcp.tool(description="List Protection Policies in a given compartment with optional filters.") +def list_protection_policies( + compartment_id: Annotated[str, "The OCID of the compartment"], + lifecycle_state: Annotated[ + Optional[str], + 'Filter by lifecycle state (e.g., "ACTIVE", "DELETED")', + ] = None, + display_name: Annotated[Optional[str], "Exact match on display name"] = None, + id: Annotated[Optional[str], "Protection Policy OCID"] = None, + limit: Annotated[Optional[int], "Maximum number of items per page"] = None, + page: Annotated[ + Optional[str], + "Pagination token (opc-next-page) to continue listing from", + ] = None, + sort_order: Annotated[ + Optional[str], 'Sort order: "ASC" or "DESC"' + ] = None, + sort_by: Annotated[ + Optional[str], 'Sort by field: "timeCreated" or "displayName"' + ] = None, + opc_request_id: Annotated[ + Optional[str], "Unique identifier for the request" + ] = None, + region: Annotated[ + Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" + ] = None, +) -> list[ProtectionPolicySummary]: + """ + Paginates through Recovery Service to list Protection Policies and returns + a list of ProtectionPolicySummary models mapped from the OCI SDK response. + """ + try: + client = get_recovery_client(region) + + results: list[ProtectionPolicySummary] = [] + has_next_page = True + next_page: Optional[str] = page + + while has_next_page: + kwargs = { + "compartment_id": compartment_id, + "page": next_page, + } + if lifecycle_state is not None: + kwargs["lifecycle_state"] = lifecycle_state + if display_name is not None: + kwargs["display_name"] = display_name + if id is not None: + kwargs["id"] = id + if limit is not None: + kwargs["limit"] = limit + if sort_order is not None: + kwargs["sort_order"] = sort_order + if sort_by is not None: + kwargs["sort_by"] = sort_by + if opc_request_id is not None: + kwargs["opc_request_id"] = opc_request_id + + response: oci.response.Response = client.list_protection_policies(**kwargs) + has_next_page = response.has_next_page + next_page = response.next_page if hasattr(response, "next_page") else None + + data = response.data + items = getattr(data, "items", data) # collection.items or raw list + for d in items: + s = map_protection_policy_summary(d) + if s is not None: + results.append(s) + + logger.info(f"Found {len(results)} Protection Policies") + return results + + except Exception as e: + logger.error(f"Error in list_protection_policies tool: {str(e)}") + raise + + +@mcp.tool(description="Get a Protection Policy by OCID.") +def get_protection_policy( + protection_policy_id: Annotated[str, "Protection Policy OCID"], + opc_request_id: Annotated[Optional[str], "Unique identifier for the request"] = None, + region: Annotated[ + Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" + ] = None, +) -> ProtectionPolicy: + """ + Retrieves a single Protection Policy resource from Recovery Service and returns + a ProtectionPolicy model mapped from the OCI SDK response. + """ + try: + client = get_recovery_client(region) + + kwargs = {} + if opc_request_id is not None: + kwargs["opc_request_id"] = opc_request_id + + response: oci.response.Response = client.get_protection_policy( + protection_policy_id=protection_policy_id, **kwargs + ) + + data = response.data + pp = map_protection_policy(data) + logger.info(f"Fetched Protection Policy {protection_policy_id}") + return pp + + except Exception as e: + logger.error(f"Error in get_protection_policy tool: {str(e)}") + raise + + +@mcp.tool(description="List Recovery Service Subnets in a given compartment with optional filters.") +def list_recovery_service_subnets( + compartment_id: Annotated[str, "The OCID of the compartment"], + lifecycle_state: Annotated[ + Optional[str], + 'Filter by lifecycle state (e.g., "CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED")', + ] = None, + display_name: Annotated[Optional[str], "Exact match on display name"] = None, + id: Annotated[Optional[str], "Recovery Service Subnet OCID"] = None, + vcn_id: Annotated[Optional[str], "Filter by VCN OCID"] = None, + limit: Annotated[Optional[int], "Maximum number of items per page"] = None, + page: Annotated[ + Optional[str], + "Pagination token (opc-next-page) to continue listing from", + ] = None, + sort_order: Annotated[ + Optional[str], 'Sort order: "ASC" or "DESC"' + ] = None, + sort_by: Annotated[ + Optional[str], 'Sort by field: "timeCreated" or "displayName"' + ] = None, + opc_request_id: Annotated[ + Optional[str], "Unique identifier for the request" + ] = None, + region: Annotated[ + Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" + ] = None, +) -> list[RecoveryServiceSubnetSummary]: + """ + Paginates through Recovery Service to list Recovery Service Subnets and returns + a list of RecoveryServiceSubnetSummary models mapped from the OCI SDK response. + """ + try: + client = get_recovery_client(region) + + results: list[RecoveryServiceSubnetSummary] = [] + has_next_page = True + next_page: Optional[str] = page + + while has_next_page: + kwargs = { + "compartment_id": compartment_id, + "page": next_page, + } + if lifecycle_state is not None: + kwargs["lifecycle_state"] = lifecycle_state + if display_name is not None: + kwargs["display_name"] = display_name + if id is not None: + kwargs["id"] = id + if vcn_id is not None: + kwargs["vcn_id"] = vcn_id + if limit is not None: + kwargs["limit"] = limit + if sort_order is not None: + kwargs["sort_order"] = sort_order + if sort_by is not None: + kwargs["sort_by"] = sort_by + if opc_request_id is not None: + kwargs["opc_request_id"] = opc_request_id + + response: oci.response.Response = client.list_recovery_service_subnets(**kwargs) + has_next_page = response.has_next_page + next_page = response.next_page if hasattr(response, "next_page") else None + + data = response.data + items = getattr(data, "items", data) # collection.items or raw list + for d in items: + s = map_recovery_service_subnet_summary(d) + if s is not None: + results.append(s) + + logger.info(f"Found {len(results)} Recovery Service Subnets") + return results + + except Exception as e: + logger.error(f"Error in list_recovery_service_subnets tool: {str(e)}") + raise + + +@mcp.tool(description="Get a Recovery Service Subnet by OCID.") +def get_recovery_service_subnet( + recovery_service_subnet_id: Annotated[str, "Recovery Service Subnet OCID"], + opc_request_id: Annotated[Optional[str], "Unique identifier for the request"] = None, + region: Annotated[ + Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" + ] = None, +) -> RecoveryServiceSubnet: + """ + Retrieves a single Recovery Service Subnet resource from Recovery Service and returns + a RecoveryServiceSubnet model mapped from the OCI SDK response. + """ + try: + client = get_recovery_client(region) + + kwargs = {} + if opc_request_id is not None: + kwargs["opc_request_id"] = opc_request_id + + response: oci.response.Response = client.get_recovery_service_subnet( + recovery_service_subnet_id=recovery_service_subnet_id, **kwargs + ) + + data = response.data + rss = map_recovery_service_subnet(data) + logger.info(f"Fetched Recovery Service Subnet {recovery_service_subnet_id}") + return rss + + except Exception as e: + logger.error(f"Error in get_recovery_service_subnet tool: {str(e)}") + raise + + +@mcp.tool +def get_recovery_service_metrics( + compartment_id: str, + start_time: str, + end_time: str, + metricName: Annotated[ + str, + "The metric that the user wants to fetch. Currently we only support:" + "SpaceUsedForRecoveryWindow, ProtectedDatabaseSize, ProtectedDatabaseHealth," + "DataLossExposure", + ] = "SpaceUsedForRecoveryWindow", + resolution: Annotated[ + str, + "The granularity of the metric. Currently we only support: 1m, 5m, 1h, 1d. Default: 1m.", + ] = "1m", + aggregation: Annotated[ + str, + "The aggregation for the metric. Currently we only support: " + "mean, sum, max, min, count. Default: mean", + ] = "mean", + protected_database_id: Annotated[ + str, + "Optional protected database OCID to filter by " "(maps to resourceId dimension)", + ] = None, +) -> list[dict]: + monitoring_client = get_monitoring_client() + namespace = "oci_recovery_service" + filter_clause = f'{{resourceId="{protected_database_id}"}}' if protected_database_id else "" + query = f"{metricName}[{resolution}]{filter_clause}.{aggregation}()" + + series_list = monitoring_client.summarize_metrics_data( + compartment_id=compartment_id, + summarize_metrics_data_details=SummarizeMetricsDataDetails( + namespace=namespace, + query=query, + start_time=start_time, + end_time=end_time, + resolution=resolution, + ), + ).data + + result: list[dict] = [] + for series in series_list: + dims = getattr(series, "dimensions", None) + points = [] + for p in getattr(series, "aggregated_datapoints", []): + points.append( + { + "timestamp": getattr(p, "timestamp", None), + "value": getattr(p, "value", None), + } + ) + result.append( + { + "dimensions": dims, + "datapoints": points, + } + ) + return result + + +def main(): + host = os.getenv("ORACLE_MCP_HOST") + port = os.getenv("ORACLE_MCP_PORT") + + if host and port: + mcp.run(transport="http", host=host, port=int(port)) + else: + mcp.run() + + +if __name__ == "__main__": + main() diff --git a/src/oci-recovery-mcp-server/pyproject.toml b/src/oci-recovery-mcp-server/pyproject.toml new file mode 100644 index 00000000..9a2a5bf7 --- /dev/null +++ b/src/oci-recovery-mcp-server/pyproject.toml @@ -0,0 +1,41 @@ +[project] +name = "oracle.oci-recovery-mcp-server" +version = "1.0.0" +description = "OCI Recovery Service MCP server" +readme = "README.md" +requires-python = ">=3.13" +license = "UPL-1.0" +license-files = ["LICENSE.txt"] +authors = [ + {name = "Oracle MCP", email = "237432095+oracle-mcp@users.noreply.github.com"}, +] +dependencies = [ + "fastmcp==2.13.0", + "oci==2.160.0", + "pydantic==2.12.3", + "mcp>=1.0.0", + "pytest-cov>=7.0.0", +] + +classifiers = [ + "License :: OSI Approved :: Universal Permissive License (UPL)", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3.13", +] + +[project.scripts] +"oracle.oci-recovery-mcp-server" = "oracle.oci_recovery_mcp_server.server:main" + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["oracle"] + +[dependency-groups] +dev = [ + "pytest>=8.4.2", + "pytest-asyncio>=1.2.0", +] From e7d933be721675ed4a14192198efdfe7ad37f0e0 Mon Sep 17 00:00:00 2001 From: hagavisi Date: Sat, 27 Dec 2025 18:15:32 +0530 Subject: [PATCH 02/11] Fix lint issues and add a few more custom tools --- .../oracle/__init__.py | 1 - .../oracle/oci_recovery_mcp_server/models.py | 1088 ++++++++++++-- .../oracle/oci_recovery_mcp_server/server.py | 1311 +++++++++++++++-- 3 files changed, 2168 insertions(+), 232 deletions(-) diff --git a/src/oci-recovery-mcp-server/oracle/__init__.py b/src/oci-recovery-mcp-server/oracle/__init__.py index e5a3af0b..d9dff098 100644 --- a/src/oci-recovery-mcp-server/oracle/__init__.py +++ b/src/oci-recovery-mcp-server/oracle/__init__.py @@ -3,4 +3,3 @@ Licensed under the Universal Permissive License v1.0 as shown at https://oss.oracle.com/licenses/upl. """ - diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py index 5831e97b..b6e07bed 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py @@ -5,11 +5,14 @@ """ from datetime import datetime -from typing import Any, Dict, Literal, Optional, List +from typing import Any, Dict, List, Literal, Optional import oci from pydantic import BaseModel, Field +# type aliases to keep line lengths within flake8 limits +HealthStatus = Literal["PROTECTED", "WARNING", "ALERT", "UNKNOWN_ENUM_VALUE"] + def _oci_to_dict(obj): """Best-effort conversion of OCI SDK model objects to plain dicts.""" @@ -59,23 +62,40 @@ class ProtectedDatabaseHealthCounts(OCIBaseModel): """ Aggregated counts of Protected Database health in a compartment/region scope. """ + compartment_id: Optional[str] = Field( - None, alias="compartmentId", description="The OCID of the compartment summarized." + None, + alias="compartmentId", + description="The OCID of the compartment summarized.", ) region: Optional[str] = Field( - None, alias="region", description="The OCI region used for the query (if specified)." + None, + alias="region", + description="The OCI region used for the query (if specified).", ) protected: int = Field( - 0, alias="protected", description="Number of Protected Databases with health=PROTECTED." + 0, + alias="protected", + description="Number of Protected Databases with health=PROTECTED.", ) warning: int = Field( - 0, alias="warning", description="Number of Protected Databases with health=WARNING." + 0, + alias="warning", + description="Number of Protected Databases with health=WARNING.", ) alert: int = Field( 0, alias="alert", description="Number of Protected Databases with health=ALERT." ) + unknown: int = Field( + 0, + alias="unknown", + description=( + "Number of Protected Databases with unknown or missing health " + "(e.g., DELETED or transitional)." + ), + ) total: int = Field( - 0, alias="total", description="Total counted (protected + warning + alert)." + 0, alias="total", description="Total Protected Databases scanned." ) @@ -83,17 +103,26 @@ class ProtectedDatabaseRedoCounts(OCIBaseModel): """ Aggregated counts of redo transport enablement for Protected Databases in a compartment/region scope. """ + compartment_id: Optional[str] = Field( - None, alias="compartmentId", description="The OCID of the compartment summarized." + None, + alias="compartmentId", + description="The OCID of the compartment summarized.", ) region: Optional[str] = Field( - None, alias="region", description="The OCI region used for the query (if specified)." + None, + alias="region", + description="The OCI region used for the query (if specified).", ) enabled: int = Field( - 0, alias="enabled", description="Count of Protected Databases with is_redo_logs_enabled = True." + 0, + alias="enabled", + description="Count of Protected Databases with is_redo_logs_enabled = True.", ) disabled: int = Field( - 0, alias="disabled", description="Count of Protected Databases with is_redo_logs_enabled = False." + 0, + alias="disabled", + description="Count of Protected Databases with is_redo_logs_enabled = False.", ) total: int = Field( 0, alias="total", description="Total counted (enabled + disabled)." @@ -102,22 +131,13 @@ class ProtectedDatabaseRedoCounts(OCIBaseModel): class ProtectedDatabaseBackupSpaceSum(OCIBaseModel): """ - Sum of backup space used (GBs) across Protected Databases in a compartment/region scope. + Simplified summary of backup space used across Protected Databases. """ - compartment_id: Optional[str] = Field( - None, alias="compartmentId", description="The OCID of the compartment summarized." - ) - region: Optional[str] = Field( - None, alias="region", description="The OCI region used for the query (if specified)." - ) - total_databases_scanned: int = Field( - 0, alias="totalDatabasesScanned", description="Number of Protected Databases scanned." - ) - sum_backup_space_used_in_gbs: float = Field( - 0.0, - alias="sumBackupSpaceUsedInGBs", - description="Sum of metrics.backup_space_used_in_gbs across all scanned Protected Databases.", - ) + + compartment_id: Optional[str] = Field(None, alias="compartmentId") + region: Optional[str] = Field(None, alias="region") + total_databases_scanned: int = Field(0, alias="totalDatabasesScanned") + sum_backup_space_used_in_gbs: float = Field(0.0, alias="sumBackupSpaceUsedInGBs") # region ProtectedDatabase and nested types (oci.recovery.models) @@ -158,7 +178,8 @@ class ProtectedDatabase(OCIBaseModel): id: Optional[str] = Field(None, description="The OCID of the Protected Database.") compartment_id: Optional[str] = Field( - None, description="The OCID of the compartment containing this Protected Database." + None, + description="The OCID of the compartment containing this Protected Database.", ) display_name: Optional[str] = Field( None, description="A user-friendly name for the Protected Database." @@ -168,8 +189,16 @@ class ProtectedDatabase(OCIBaseModel): protection_policy_id: Optional[str] = Field( None, description="The OCID of the attached Protection Policy." ) - recovery_service_subnet_id: Optional[str] = Field( - None, description="The OCID of the Recovery Service Subnet associated with this database." + policy_locked_date_time: Optional[str] = Field( + None, + description=( + "When the protection policy retention lock is scheduled to take effect " + "(RFC3339 string)." + ), + ) + recovery_service_subnets: Optional[List["RecoveryServiceSubnetDetails"]] = Field( + None, + description="List of Recovery Service Subnet resources associated with this protected database.", ) # DB identification (may not always be present for all database types) @@ -180,40 +209,78 @@ class ProtectedDatabase(OCIBaseModel): None, description="The DB_UNIQUE_NAME of the protected database, if available." ) vpc_user_name: Optional[str] = Field( - None, description="The VPC user name associated with the protected database, if available." + None, + description="The VPC user name associated with the protected database, if available.", ) database_size: Optional[ Literal["XS", "S", "M", "L", "XL", "XXL", "AUTO", "UNKNOWN_ENUM_VALUE"] ] = Field( - None, description="Configured database size category for the protected database." + None, + description="Configured database size category for the protected database.", ) - db_name: Optional[str] = Field( - None, description="The database name, if available." + database_size_in_gbs: Optional[int] = Field( + None, + description="The size of the database in gigabytes, if reported by the service.", + ) + change_rate: Optional[float] = Field( + None, + description="Percentage of data change between successive incremental backups.", + ) + compression_ratio: Optional[float] = Field( + None, description="Compression ratio (compressed size to expanded size)." ) # Status and health lifecycle_state: Optional[ - Literal["CREATING", "ACTIVE", "UPDATING", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED"] - ] = Field(None, description="The current lifecycle state of the Protected Database.") + Literal[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETE_SCHEDULED", + "DELETING", + "DELETED", + "FAILED", + "UNKNOWN_ENUM_VALUE", + ] + ] = Field( + None, description="The current lifecycle state of the Protected Database." + ) lifecycle_details: Optional[str] = Field( None, description="Additional details about the current lifecycle state." ) - health: Optional[ - Literal["PROTECTED", "WARNING", "ALERT"] - ] = Field( + health_details: Optional[str] = Field( + None, + description="A message describing the current health of the protected database.", + ) + is_read_only_resource: Optional[bool] = Field( + None, + description=( + "Indicates whether the protected database is created by Recovery Service " + "(TRUE) or manually (FALSE)." + ), + ) + health: Optional[HealthStatus] = Field( None, description="Service-evaluated health status: PROTECTED, WARNING, or ALERT.", ) # Redo transport (for zero data loss RPO) - is_redo_logs_enabled: Optional[bool] = Field( - None, description="Whether redo transport is enabled for this Protected Database." + is_redo_logs_shipped: Optional[bool] = Field( + None, + description=( + "Whether real-time redo shipping to Recovery Service is enabled " + "(SDK: is_redo_logs_shipped)." + ), ) # Metrics - metrics: Optional[ProtectedDatabaseMetrics] = Field( + metrics: Optional["Metrics"] = Field( None, description="Metrics associated with this Protected Database." ) + subscription_id: Optional[str] = Field( + None, + description="The OCID of the cloud service subscription linked to the protected database.", + ) # Timestamps time_created: Optional[datetime] = Field( @@ -280,31 +347,73 @@ def map_protected_database( return ProtectedDatabase( id=getattr(pd, "id", None) or data.get("id"), - compartment_id=getattr(pd, "compartment_id", None) or data.get("compartment_id"), - display_name=getattr(pd, "display_name", None) or data.get("display_name"), + compartment_id=getattr(pd, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + display_name=getattr(pd, "display_name", None) + or data.get("display_name") + or data.get("displayName"), protection_policy_id=getattr(pd, "protection_policy_id", None) or data.get("protection_policy_id") or data.get("protectionPolicyId"), - recovery_service_subnet_id=getattr(pd, "recovery_service_subnet_id", None) - or data.get("recovery_service_subnet_id") - or data.get("recoveryServiceSubnetId"), - database_id=getattr(pd, "database_id", None) or data.get("database_id"), - db_unique_name=getattr(pd, "db_unique_name", None) or data.get("db_unique_name"), - db_name=getattr(pd, "db_name", None) or data.get("db_name"), - lifecycle_state=getattr(pd, "lifecycle_state", None) or data.get("lifecycle_state"), - lifecycle_details=getattr(pd, "lifecycle_details", None) or data.get("lifecycle_details"), - health=getattr(pd, "health", None) or data.get("health"), - is_redo_logs_enabled=getattr(pd, "is_redo_logs_enabled", None) - or data.get("is_redo_logs_enabled") - or data.get("isRedoLogsEnabled"), - metrics=map_protected_database_metrics( - getattr(pd, "metrics", None) or data.get("metrics") + policy_locked_date_time=getattr(pd, "policy_locked_date_time", None) + or data.get("policy_locked_date_time") + or data.get("policyLockedDateTime"), + recovery_service_subnets=_map_list( + getattr(pd, "recovery_service_subnets", None) + or data.get("recovery_service_subnets") + or data.get("recoveryServiceSubnets"), + map_recovery_service_subnet_details, ), - time_created=getattr(pd, "time_created", None) or data.get("time_created"), - time_updated=getattr(pd, "time_updated", None) or data.get("time_updated"), - freeform_tags=getattr(pd, "freeform_tags", None) or data.get("freeform_tags"), - defined_tags=getattr(pd, "defined_tags", None) or data.get("defined_tags"), - system_tags=getattr(pd, "system_tags", None) or data.get("system_tags"), + database_id=getattr(pd, "database_id", None) + or data.get("database_id") + or data.get("databaseId"), + database_size_in_gbs=getattr(pd, "database_size_in_gbs", None) + or data.get("database_size_in_gbs") + or data.get("databaseSizeInGBs") + or data.get("databaseSizeInGbs"), + change_rate=getattr(pd, "change_rate", None) + or data.get("change_rate") + or data.get("changeRate"), + compression_ratio=getattr(pd, "compression_ratio", None) + or data.get("compression_ratio") + or data.get("compressionRatio"), + db_unique_name=getattr(pd, "db_unique_name", None) + or data.get("db_unique_name"), + lifecycle_state=getattr(pd, "lifecycle_state", None) + or data.get("lifecycle_state"), + lifecycle_details=getattr(pd, "lifecycle_details", None) + or data.get("lifecycle_details") + or data.get("lifecycleDetails"), + health_details=getattr(pd, "health_details", None) + or data.get("health_details") + or data.get("healthDetails"), + is_read_only_resource=getattr(pd, "is_read_only_resource", None) + or data.get("is_read_only_resource") + or data.get("isReadOnlyResource"), + health=getattr(pd, "health", None) or data.get("health"), + is_redo_logs_shipped=getattr(pd, "is_redo_logs_shipped", None) + or data.get("is_redo_logs_shipped") + or data.get("isRedoLogsShipped"), + metrics=map_metrics(getattr(pd, "metrics", None) or data.get("metrics")), + subscription_id=getattr(pd, "subscription_id", None) + or data.get("subscription_id") + or data.get("subscriptionId"), + time_created=getattr(pd, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + time_updated=getattr(pd, "time_updated", None) + or data.get("time_updated") + or data.get("timeUpdated"), + freeform_tags=getattr(pd, "freeform_tags", None) + or data.get("freeform_tags") + or data.get("freeformTags"), + defined_tags=getattr(pd, "defined_tags", None) + or data.get("defined_tags") + or data.get("definedTags"), + system_tags=getattr(pd, "system_tags", None) + or data.get("system_tags") + or data.get("systemTags"), ) @@ -331,7 +440,15 @@ class RecoveryServiceSubnet(OCIBaseModel): None, description="List of Network Security Group OCIDs attached to the RSS." ) lifecycle_state: Optional[ - Literal["CREATING", "ACTIVE", "UPDATING", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED"] + Literal[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETE_SCHEDULED", + "DELETING", + "DELETED", + "FAILED", + ] ] = Field(None, description="The current lifecycle state of the RSS.") lifecycle_details: Optional[str] = Field( None, description="Additional details about the RSS lifecycle." @@ -455,7 +572,8 @@ class ProtectedDatabaseSummary(OCIBaseModel): id: Optional[str] = Field(None, description="The OCID of the Protected Database.") compartment_id: Optional[str] = Field( - None, description="The OCID of the compartment containing the Protected Database." + None, + description="The OCID of the compartment containing the Protected Database.", ) display_name: Optional[str] = Field( None, description="A user-friendly name for the Protected Database." @@ -463,14 +581,16 @@ class ProtectedDatabaseSummary(OCIBaseModel): protection_policy_id: Optional[str] = Field( None, description="The OCID of the attached Protection Policy." ) - recovery_service_subnet_id: Optional[str] = Field( - None, description="The OCID of the Recovery Service Subnet associated with this database." - ) policy_locked_date_time: Optional[str] = Field( - None, description="Timestamp when the protection policy was locked (RFC3339 string)." + None, + description="Timestamp when the protection policy was locked (RFC3339 string).", ) recovery_service_subnets: Optional[List["RecoveryServiceSubnetDetails"]] = Field( - None, description="List of Recovery Service Subnet details associated with this protected database." + None, + description=( + "List of Recovery Service Subnet details associated with this " + "protected database." + ), ) database_id: Optional[str] = Field( None, description="The OCID of the backing database, where applicable." @@ -479,37 +599,47 @@ class ProtectedDatabaseSummary(OCIBaseModel): None, description="The DB_UNIQUE_NAME of the protected database, if available." ) vpc_user_name: Optional[str] = Field( - None, description="The VPC user name associated with the protected database, if available." + None, + description="The VPC user name associated with the protected database, if available.", ) database_size: Optional[ Literal["XS", "S", "M", "L", "XL", "XXL", "AUTO", "UNKNOWN_ENUM_VALUE"] ] = Field( - None, description="Configured database size category." + None, + description="Configured database size category for the protected database.", ) - db_name: Optional[str] = Field(None, description="The database name, if available.") lifecycle_state: Optional[ - Literal["CREATING", "ACTIVE", "UPDATING", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED"] + Literal[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETE_SCHEDULED", + "DELETING", + "DELETED", + "FAILED", + "UNKNOWN_ENUM_VALUE", + ] ] = Field(None, description="The current lifecycle state.") - health: Optional[ - Literal["PROTECTED", "WARNING", "ALERT"] - ] = Field(None, description="Health status.") + health: Optional[HealthStatus] = Field(None, description="Health status.") lifecycle_details: Optional[str] = Field( - None, description="Detailed description about the current lifecycle state of the protected database." + None, + description="Detailed description about the current lifecycle state of the protected database.", ) health_details: Optional[str] = Field( - None, description="A message describing the current health of the protected database." + None, + description="A message describing the current health of the protected database.", ) is_read_only_resource: Optional[bool] = Field( - None, description="Indicates whether the protected database is created by the service (TRUE) or manually (FALSE)." + None, + description="Indicates whether the protected database is created by the service (TRUE) " + "or manually (FALSE).", ) metrics: Optional["MetricsSummary"] = Field( None, description="Metrics summary associated with this protected database." ) subscription_id: Optional[str] = Field( - None, description="The OCID of the cloud service subscription linked to the protected database." - ) - is_redo_logs_enabled: Optional[bool] = Field( - None, description="Whether redo transport is enabled." + None, + description="The OCID of the cloud service subscription linked to the protected database.", ) time_created: Optional[datetime] = Field( None, description="The time the Protected Database was created (RFC3339)." @@ -517,9 +647,7 @@ class ProtectedDatabaseSummary(OCIBaseModel): time_updated: Optional[datetime] = Field( None, description="The time the Protected Database was last updated (RFC3339)." ) - freeform_tags: Optional[Dict[str, str]] = Field( - None, description="Free-form tags." - ) + freeform_tags: Optional[Dict[str, str]] = Field(None, description="Free-form tags.") defined_tags: Optional[Dict[str, Dict[str, Any]]] = Field( None, description="Defined tags." ) @@ -534,7 +662,11 @@ def map_protected_database_summary( if pds is None: return None data = _oci_to_dict(pds) or {} - rss_in = getattr(pds, "recovery_service_subnets", None) or data.get("recovery_service_subnets") or data.get("recoveryServiceSubnets") + rss_in = ( + getattr(pds, "recovery_service_subnets", None) + or data.get("recovery_service_subnets") + or data.get("recoveryServiceSubnets") + ) return ProtectedDatabaseSummary( id=getattr(pds, "id", None) or data.get("id"), compartment_id=getattr(pds, "compartment_id", None) @@ -550,9 +682,6 @@ def map_protected_database_summary( or data.get("policy_locked_date_time") or data.get("policyLockedDateTime"), recovery_service_subnets=_map_list(rss_in, map_recovery_service_subnet_details), - recovery_service_subnet_id=getattr(pds, "recovery_service_subnet_id", None) - or data.get("recovery_service_subnet_id") - or data.get("recoveryServiceSubnetId"), database_id=getattr(pds, "database_id", None) or data.get("database_id") or data.get("databaseId"), @@ -565,7 +694,6 @@ def map_protected_database_summary( database_size=getattr(pds, "database_size", None) or data.get("database_size") or data.get("databaseSize"), - db_name=getattr(pds, "db_name", None) or data.get("db_name"), lifecycle_state=getattr(pds, "lifecycle_state", None) or data.get("lifecycle_state") or data.get("lifecycleState"), @@ -579,10 +707,9 @@ def map_protected_database_summary( is_read_only_resource=getattr(pds, "is_read_only_resource", None) or data.get("is_read_only_resource") or data.get("isReadOnlyResource"), - is_redo_logs_enabled=getattr(pds, "is_redo_logs_enabled", None) - or data.get("is_redo_logs_enabled") - or data.get("isRedoLogsEnabled"), - metrics=map_metrics_summary(getattr(pds, "metrics", None) or data.get("metrics")), + metrics=map_metrics_summary( + getattr(pds, "metrics", None) or data.get("metrics") + ), subscription_id=getattr(pds, "subscription_id", None) or data.get("subscription_id") or data.get("subscriptionId"), @@ -909,9 +1036,7 @@ def map_metrics(m) -> Metrics | None: database_size_in_gbs=getattr(m, "database_size_in_gbs", None) or data.get("database_size_in_gbs") or data.get("databaseSizeInGbs"), - recoverable_window_start_time=getattr( - m, "recoverable_window_start_time", None - ) + recoverable_window_start_time=getattr(m, "recoverable_window_start_time", None) or data.get("recoverable_window_start_time") or data.get("recoverableWindowStartTime"), recoverable_window_end_time=getattr(m, "recoverable_window_end_time", None) @@ -963,9 +1088,7 @@ def map_metrics_summary(ms) -> MetricsSummary | None: database_size_in_gbs=getattr(ms, "database_size_in_gbs", None) or data.get("database_size_in_gbs") or data.get("databaseSizeInGbs"), - recoverable_window_start_time=getattr( - ms, "recoverable_window_start_time", None - ) + recoverable_window_start_time=getattr(ms, "recoverable_window_start_time", None) or data.get("recoverable_window_start_time") or data.get("recoverableWindowStartTime"), recoverable_window_end_time=getattr(ms, "recoverable_window_end_time", None) @@ -988,17 +1111,17 @@ class ProtectionPolicy(OCIBaseModel): Named ProtectionPolicy here as requested. """ - id: Optional[str] = Field( - None, description="The OCID of the protection policy." - ) + id: Optional[str] = Field(None, description="The OCID of the protection policy.") display_name: Optional[str] = Field( None, description="A user-friendly name for the protection policy." ) compartment_id: Optional[str] = Field( - None, description="The OCID of the compartment containing the protection policy." + None, + description="The OCID of the compartment containing the protection policy.", ) backup_retention_period_in_days: Optional[int] = Field( - None, description="Exact number of days to retain backups created by Recovery Service." + None, + description="Exact number of days to retain backups created by Recovery Service.", ) is_predefined_policy: Optional[bool] = Field( None, description="Whether this is an Oracle-defined predefined policy." @@ -1007,7 +1130,8 @@ class ProtectionPolicy(OCIBaseModel): None, description="When the protection policy was locked (RFC3339 string)." ) must_enforce_cloud_locality: Optional[bool] = Field( - None, description="Whether backup storage must stay in the same cloud locality as the database." + None, + description="Whether backup storage must stay in the same cloud locality as the database.", ) time_created: Optional[datetime] = Field( None, description="The time the protection policy was created (RFC3339)." @@ -1072,9 +1196,7 @@ def map_protection_policy( policy_locked_date_time=getattr(pp, "policy_locked_date_time", None) or data.get("policy_locked_date_time") or data.get("policyLockedDateTime"), - must_enforce_cloud_locality=getattr( - pp, "must_enforce_cloud_locality", None - ) + must_enforce_cloud_locality=getattr(pp, "must_enforce_cloud_locality", None) or data.get("must_enforce_cloud_locality") or data.get("mustEnforceCloudLocality"), time_created=getattr(pp, "time_created", None) @@ -1109,17 +1231,17 @@ class ProtectionPolicySummary(OCIBaseModel): Pydantic model mirroring oci.recovery.models.ProtectionPolicySummary. """ - id: Optional[str] = Field( - None, description="The OCID of the protection policy." - ) + id: Optional[str] = Field(None, description="The OCID of the protection policy.") display_name: Optional[str] = Field( None, description="A user-friendly name for the protection policy." ) compartment_id: Optional[str] = Field( - None, description="The OCID of the compartment containing the protection policy." + None, + description="The OCID of the compartment containing the protection policy.", ) backup_retention_period_in_days: Optional[int] = Field( - None, description="Exact number of days to retain backups created by Recovery Service." + None, + description="Exact number of days to retain backups created by Recovery Service.", ) is_predefined_policy: Optional[bool] = Field( None, description="Whether this is an Oracle-defined predefined policy." @@ -1128,7 +1250,8 @@ class ProtectionPolicySummary(OCIBaseModel): None, description="When the protection policy was locked (RFC3339 string)." ) must_enforce_cloud_locality: Optional[bool] = Field( - None, description="Whether backup storage must stay in the same cloud locality as the database." + None, + description="Whether backup storage must stay in the same cloud locality as the database.", ) time_created: Optional[datetime] = Field( None, description="The time the protection policy was created (RFC3339)." @@ -1193,9 +1316,7 @@ def map_protection_policy_summary( policy_locked_date_time=getattr(pps, "policy_locked_date_time", None) or data.get("policy_locked_date_time") or data.get("policyLockedDateTime"), - must_enforce_cloud_locality=getattr( - pps, "must_enforce_cloud_locality", None - ) + must_enforce_cloud_locality=getattr(pps, "must_enforce_cloud_locality", None) or data.get("must_enforce_cloud_locality") or data.get("mustEnforceCloudLocality"), time_created=getattr(pps, "time_created", None) @@ -1223,3 +1344,734 @@ def map_protection_policy_summary( # endregion + +# region Database Service (oci.database.models) + + +class BackupDestinationDetails(OCIBaseModel): + """ + Pydantic model for backup destination details within DbBackupConfig. + Covers common fields across destination types; unmodeled keys are preserved in 'extras'. + """ + + type: Optional[str] = Field( + None, description="Destination type, e.g., DBRS, OBJECT_STORE, NFS." + ) + destination_type: Optional[str] = Field( + None, description="Original destination type value if provided." + ) + id: Optional[str] = Field( + None, description="Destination OCID/identifier when applicable." + ) + backup_destination_id: Optional[str] = Field( + None, description="Backup destination OCID if provided by SDK." + ) + bucket_name: Optional[str] = Field( + None, description="Object Storage bucket name (OBJECT_STORE only)." + ) + namespace: Optional[str] = Field( + None, description="Object Storage namespace (OBJECT_STORE only)." + ) + region: Optional[str] = Field( + None, description="Region for Object Storage destination." + ) + local_mount_point: Optional[str] = Field( + None, description="Local mount point path (NFS)." + ) + nfs_server: Optional[str] = Field(None, description="NFS server address (NFS).") + path: Optional[str] = Field(None, description="Destination path if provided.") + vault_id: Optional[str] = Field( + None, description="Vault OCID for encryption (if applicable)." + ) + encryption_key_id: Optional[str] = Field( + None, description="KMS key OCID (if applicable)." + ) + compartment_id: Optional[str] = Field( + None, description="Compartment OCID of the destination (if applicable)." + ) + tenancy_id: Optional[str] = Field(None, description="Tenancy OCID (if applicable).") + extras: Optional[Dict[str, Any]] = Field( + None, description="Any provider-specific fields not modeled above." + ) + + +def map_backup_destination_details(det) -> BackupDestinationDetails | None: + if not det: + return None + data = _oci_to_dict(det) or {} + + def pick(*names: str): + for n in names: + v = getattr(det, n, None) + if v is not None: + return v + if data.get(n) is not None: + return data.get(n) + return None + + type_val = pick("type", "destination_type", "destinationType") + id_val = pick("id", "backup_destination_id", "backupDestinationId", "destinationId") + bucket = pick("bucket_name", "bucketName") + namespace = pick("namespace") + region = pick("region") + local_mount = pick("local_mount_point", "localMountPoint", "mountPoint") + nfs_server = pick("nfs_server", "nfsServer", "nfsServerIp", "nfs_server_ip") + path = pick("path") + vault_id = pick("vault_id", "vaultId") + key_id = pick("encryption_key_id", "encryptionKeyId", "kmsKeyId") + compartment_id = pick("compartment_id", "compartmentId") + tenancy_id = pick("tenancy_id", "tenancyId") + + consumed = { + "type", + "destination_type", + "destinationType", + "id", + "backup_destination_id", + "backupDestinationId", + "destinationId", + "bucket_name", + "bucketName", + "namespace", + "region", + "local_mount_point", + "localMountPoint", + "mountPoint", + "nfs_server", + "nfsServer", + "nfsServerIp", + "nfs_server_ip", + "path", + "vault_id", + "vaultId", + "encryption_key_id", + "encryptionKeyId", + "kmsKeyId", + "compartment_id", + "compartmentId", + "tenancy_id", + "tenancyId", + } + extras = None + try: + extras = {k: v for k, v in data.items() if k not in consumed} + except Exception: + extras = None + + return BackupDestinationDetails( + type=type_val, + destination_type=( + pick("destination_type", "destinationType") + if type_val is not None + else pick("destination_type", "destinationType") + ), + id=id_val, + backup_destination_id=pick("backup_destination_id", "backupDestinationId"), + bucket_name=bucket, + namespace=namespace, + region=region, + local_mount_point=local_mount, + nfs_server=nfs_server, + path=path, + vault_id=vault_id, + encryption_key_id=key_id, + compartment_id=compartment_id, + tenancy_id=tenancy_id, + extras=extras, + ) + + +class DbBackupConfig(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.DbBackupConfig. + Nested under Database/DatabaseSummary as db_backup_config. + """ + + is_auto_backup_enabled: Optional[bool] = Field( + None, description="Whether auto backup is enabled." + ) + auto_backup_window: Optional[str] = Field( + None, description="Preferred start time window for auto backups." + ) + recovery_window_in_days: Optional[int] = Field( + None, description="Recovery window in days (Recovery Service)." + ) + vpcu_user: Optional[str] = Field( + None, description="Virtual Private Catalog user (VPC user) if configured." + ) + backup_deletion_policy: Optional[str] = Field( + None, description="Deletion policy for backups." + ) + backup_destination_details: Optional[List[BackupDestinationDetails]] = Field( + None, description="Backup destination details." + ) + extras: Optional[Dict[str, Any]] = Field( + None, description="Any provider-specific fields not modeled above." + ) + + +def map_db_backup_config(cfg) -> DbBackupConfig | None: + if not cfg: + return None + data = _oci_to_dict(cfg) or {} + + def pick(*names: str): + for n in names: + v = getattr(cfg, n, None) + if v is not None: + return v + if data.get(n) is not None: + return data.get(n) + return None + + is_auto = pick( + "is_auto_backup_enabled", + "isAutoBackupEnabled", + "auto_backup_enabled", + "autoBackupEnabled", + ) + window = pick( + "auto_backup_window", + "autoBackupWindow", + "preferred_backup_window", + "preferredBackupWindow", + ) + recovery_days = pick( + "recovery_window_in_days", "recoveryWindowInDays", "recovery_window" + ) + vpcu = pick( + "vpcu_user", + "vpc_user", + "vpcUser", + "vpcUserName", + "vpc_user_name", + "vpcUsername", + ) + deletion_policy = pick("backup_deletion_policy", "backupDeletionPolicy") + dests = ( + pick("backup_destination_details", "backupDestinationDetails") + or data.get("backupDestinationDetails") + or data.get("backup_destination_details") + ) + mapped_dests = _map_list(dests, map_backup_destination_details) + + consumed = { + "is_auto_backup_enabled", + "isAutoBackupEnabled", + "auto_backup_enabled", + "autoBackupEnabled", + "auto_backup_window", + "autoBackupWindow", + "preferred_backup_window", + "preferredBackupWindow", + "recovery_window_in_days", + "recoveryWindowInDays", + "recovery_window", + "vpcu_user", + "vpc_user", + "vpcUser", + "vpcUserName", + "vpc_user_name", + "vpcUsername", + "backup_deletion_policy", + "backupDeletionPolicy", + "backup_destination_details", + "backupDestinationDetails", + } + extras = None + try: + extras = {k: v for k, v in data.items() if k not in consumed} + except Exception: + extras = None + + return DbBackupConfig( + is_auto_backup_enabled=is_auto, + auto_backup_window=window, + recovery_window_in_days=recovery_days, + vpcu_user=vpcu, + backup_deletion_policy=deletion_policy, + backup_destination_details=mapped_dests, + extras=extras, + ) + + +class Database(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.Database. + """ + + id: Optional[str] = Field(None, description="The OCID of the Database.") + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing the Database." + ) + lifecycle_state: Optional[str] = Field( + None, description="The current lifecycle state of the Database." + ) + db_name: Optional[str] = Field(None, description="The database name.") + db_unique_name: Optional[str] = Field( + None, description="The DB_UNIQUE_NAME of the database." + ) + db_home_id: Optional[str] = Field(None, description="The OCID of the DB Home.") + db_system_id: Optional[str] = Field( + None, description="The OCID of the DB System (if applicable)." + ) + db_backup_config: Optional["DbBackupConfig"] = Field( + None, description="Database backup configuration." + ) + protection_policy_id: Optional[str] = Field( + None, + description="Recovery Service Protection Policy OCID linked via Protected Database, if any.", + ) + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + + +def map_database(db) -> Database | None: + if db is None: + return None + data = _oci_to_dict(db) or {} + return Database( + id=getattr(db, "id", None) or data.get("id"), + compartment_id=getattr(db, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + lifecycle_state=getattr(db, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + db_name=getattr(db, "db_name", None) + or data.get("db_name") + or data.get("dbName"), + db_unique_name=getattr(db, "db_unique_name", None) + or data.get("db_unique_name") + or data.get("dbUniqueName"), + db_home_id=getattr(db, "db_home_id", None) + or data.get("db_home_id") + or data.get("dbHomeId"), + db_system_id=getattr(db, "db_system_id", None) + or data.get("db_system_id") + or data.get("dbSystemId"), + db_backup_config=map_db_backup_config( + getattr(db, "db_backup_config", None) + or data.get("db_backup_config") + or data.get("dbBackupConfig") + or data.get("databaseBackupConfig") + ), + time_created=getattr(db, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + ) + + +class DatabaseSummary(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.DatabaseSummary. + """ + + id: Optional[str] = Field(None, description="The OCID of the Database.") + compartment_id: Optional[str] = Field( + None, description="The OCID of the compartment containing the Database." + ) + lifecycle_state: Optional[str] = Field( + None, description="The current lifecycle state of the Database." + ) + db_name: Optional[str] = Field(None, description="The database name.") + db_unique_name: Optional[str] = Field( + None, description="The DB_UNIQUE_NAME of the database." + ) + db_home_id: Optional[str] = Field(None, description="The OCID of the DB Home.") + db_system_id: Optional[str] = Field( + None, description="The OCID of the DB System (if applicable)." + ) + db_backup_config: Optional["DbBackupConfig"] = Field( + None, description="Database backup configuration." + ) + protection_policy_id: Optional[str] = Field( + None, + description="Recovery Service Protection Policy OCID linked via Protected Database, if any.", + ) + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + + +def map_database_summary(db) -> DatabaseSummary | None: + if db is None: + return None + data = _oci_to_dict(db) or {} + return DatabaseSummary( + id=getattr(db, "id", None) or data.get("id"), + compartment_id=getattr(db, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + lifecycle_state=getattr(db, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + db_name=getattr(db, "db_name", None) + or data.get("db_name") + or data.get("dbName"), + db_unique_name=getattr(db, "db_unique_name", None) + or data.get("db_unique_name") + or data.get("dbUniqueName"), + db_home_id=getattr(db, "db_home_id", None) + or data.get("db_home_id") + or data.get("dbHomeId"), + db_system_id=getattr(db, "db_system_id", None) + or data.get("db_system_id") + or data.get("dbSystemId"), + db_backup_config=map_db_backup_config( + getattr(db, "db_backup_config", None) + or data.get("db_backup_config") + or data.get("dbBackupConfig") + or data.get("databaseBackupConfig") + ), + time_created=getattr(db, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + ) + + +class BackupSummary(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.BackupSummary. + """ + + id: Optional[str] = Field(None, description="The OCID of the backup.") + display_name: Optional[str] = Field(None, description="Display name.") + compartment_id: Optional[str] = Field(None, description="Compartment OCID.") + database_id: Optional[str] = Field(None, description="Database OCID.") + lifecycle_state: Optional[str] = Field(None, description="Lifecycle state.") + type: Optional[str] = Field(None, description="Backup type.") + time_started: Optional[datetime] = Field(None, description="Start time (RFC3339).") + time_ended: Optional[datetime] = Field(None, description="End time (RFC3339).") + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + size_in_gbs: Optional[float] = Field(None, description="Backup size in GBs.") + + +def map_backup_summary(b) -> BackupSummary | None: + if b is None: + return None + data = _oci_to_dict(b) or {} + return BackupSummary( + id=getattr(b, "id", None) or data.get("id"), + display_name=getattr(b, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(b, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + database_id=getattr(b, "database_id", None) + or data.get("database_id") + or data.get("databaseId"), + lifecycle_state=getattr(b, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + type=getattr(b, "type", None) or data.get("type"), + time_started=getattr(b, "time_started", None) + or data.get("time_started") + or data.get("timeStarted"), + time_ended=getattr(b, "time_ended", None) + or data.get("time_ended") + or data.get("timeEnded"), + time_created=getattr(b, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + size_in_gbs=getattr(b, "size_in_gbs", None) + or data.get("size_in_gbs") + or data.get("sizeInGBs") + or data.get("sizeInGbs"), + ) + + +class Backup(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.Backup. + """ + + id: Optional[str] = Field(None, description="The OCID of the backup.") + display_name: Optional[str] = Field(None, description="Display name.") + compartment_id: Optional[str] = Field(None, description="Compartment OCID.") + database_id: Optional[str] = Field(None, description="Database OCID.") + lifecycle_state: Optional[str] = Field(None, description="Lifecycle state.") + type: Optional[str] = Field(None, description="Backup type.") + time_started: Optional[datetime] = Field(None, description="Start time (RFC3339).") + time_ended: Optional[datetime] = Field(None, description="End time (RFC3339).") + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + size_in_gbs: Optional[float] = Field(None, description="Backup size in GBs.") + database_version: Optional[str] = Field( + None, description="Database version at backup time." + ) + + +def map_backup(b) -> Backup | None: + if b is None: + return None + data = _oci_to_dict(b) or {} + return Backup( + id=getattr(b, "id", None) or data.get("id"), + display_name=getattr(b, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(b, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + database_id=getattr(b, "database_id", None) + or data.get("database_id") + or data.get("databaseId"), + lifecycle_state=getattr(b, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + type=getattr(b, "type", None) or data.get("type"), + time_started=getattr(b, "time_started", None) + or data.get("time_started") + or data.get("timeStarted"), + time_ended=getattr(b, "time_ended", None) + or data.get("time_ended") + or data.get("timeEnded"), + time_created=getattr(b, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + size_in_gbs=getattr(b, "size_in_gbs", None) + or data.get("size_in_gbs") + or data.get("sizeInGBs") + or data.get("sizeInGbs"), + database_version=getattr(b, "database_version", None) + or data.get("database_version") + or data.get("databaseVersion"), + ) + + +class DatabaseHomeSummary(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.DbHomeSummary. + """ + + id: Optional[str] = Field(None, description="The OCID of the DB Home.") + display_name: Optional[str] = Field(None, description="Display name.") + compartment_id: Optional[str] = Field(None, description="Compartment OCID.") + db_system_id: Optional[str] = Field(None, description="DB System OCID.") + lifecycle_state: Optional[str] = Field(None, description="Lifecycle state.") + db_version: Optional[str] = Field(None, description="DB version.") + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + + +def map_database_home_summary(h) -> DatabaseHomeSummary | None: + if h is None: + return None + data = _oci_to_dict(h) or {} + return DatabaseHomeSummary( + id=getattr(h, "id", None) or data.get("id"), + display_name=getattr(h, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(h, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + db_system_id=getattr(h, "db_system_id", None) + or data.get("db_system_id") + or data.get("dbSystemId"), + lifecycle_state=getattr(h, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + db_version=getattr(h, "db_version", None) + or data.get("db_version") + or data.get("dbVersion"), + time_created=getattr(h, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + ) + + +class DatabaseHome(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.DbHome. + """ + + id: Optional[str] = Field(None, description="The OCID of the DB Home.") + display_name: Optional[str] = Field(None, description="Display name.") + compartment_id: Optional[str] = Field(None, description="Compartment OCID.") + db_system_id: Optional[str] = Field(None, description="DB System OCID.") + lifecycle_state: Optional[str] = Field(None, description="Lifecycle state.") + db_version: Optional[str] = Field(None, description="DB version.") + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + + +def map_database_home(h) -> DatabaseHome | None: + if h is None: + return None + data = _oci_to_dict(h) or {} + return DatabaseHome( + id=getattr(h, "id", None) or data.get("id"), + display_name=getattr(h, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(h, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + db_system_id=getattr(h, "db_system_id", None) + or data.get("db_system_id") + or data.get("dbSystemId"), + lifecycle_state=getattr(h, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + db_version=getattr(h, "db_version", None) + or data.get("db_version") + or data.get("dbVersion"), + time_created=getattr(h, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + ) + + +class DbSystemSummary(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.DbSystemSummary. + """ + + id: Optional[str] = Field(None, description="DB System OCID.") + display_name: Optional[str] = Field(None, description="Display name.") + compartment_id: Optional[str] = Field(None, description="Compartment OCID.") + lifecycle_state: Optional[str] = Field(None, description="Lifecycle state.") + shape: Optional[str] = Field(None, description="Shape.") + cpu_core_count: Optional[int] = Field(None, description="CPU core count.") + node_count: Optional[int] = Field(None, description="Node count.") + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + + +def map_db_system_summary(s) -> DbSystemSummary | None: + if s is None: + return None + data = _oci_to_dict(s) or {} + return DbSystemSummary( + id=getattr(s, "id", None) or data.get("id"), + display_name=getattr(s, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(s, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + lifecycle_state=getattr(s, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + shape=getattr(s, "shape", None) or data.get("shape"), + cpu_core_count=getattr(s, "cpu_core_count", None) + or data.get("cpu_core_count") + or data.get("cpuCoreCount"), + node_count=getattr(s, "node_count", None) + or data.get("node_count") + or data.get("nodeCount"), + time_created=getattr(s, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + ) + + +class DbSystem(OCIBaseModel): + """ + Pydantic model mirroring oci.database.models.DbSystem. + """ + + id: Optional[str] = Field(None, description="DB System OCID.") + display_name: Optional[str] = Field(None, description="Display name.") + compartment_id: Optional[str] = Field(None, description="Compartment OCID.") + lifecycle_state: Optional[str] = Field(None, description="Lifecycle state.") + shape: Optional[str] = Field(None, description="Shape.") + cpu_core_count: Optional[int] = Field(None, description="CPU core count.") + node_count: Optional[int] = Field(None, description="Node count.") + license_model: Optional[str] = Field(None, description="License model.") + availability_domain: Optional[str] = Field(None, description="Availability domain.") + time_created: Optional[datetime] = Field( + None, description="Creation time (RFC3339)." + ) + + +def map_db_system(s) -> DbSystem | None: + if s is None: + return None + data = _oci_to_dict(s) or {} + return DbSystem( + id=getattr(s, "id", None) or data.get("id"), + display_name=getattr(s, "display_name", None) + or data.get("display_name") + or data.get("displayName"), + compartment_id=getattr(s, "compartment_id", None) + or data.get("compartment_id") + or data.get("compartmentId"), + lifecycle_state=getattr(s, "lifecycle_state", None) + or data.get("lifecycle_state") + or data.get("lifecycleState"), + shape=getattr(s, "shape", None) or data.get("shape"), + cpu_core_count=getattr(s, "cpu_core_count", None) + or data.get("cpu_core_count") + or data.get("cpuCoreCount"), + node_count=getattr(s, "node_count", None) + or data.get("node_count") + or data.get("nodeCount"), + license_model=getattr(s, "license_model", None) + or data.get("license_model") + or data.get("licenseModel"), + availability_domain=getattr(s, "availability_domain", None) + or data.get("availability_domain") + or data.get("availabilityDomain"), + time_created=getattr(s, "time_created", None) + or data.get("time_created") + or data.get("timeCreated"), + ) + + +# Database Protection Summary (constructed by server, no direct SDK mapping) + + +class ProtectedDatabaseBackupDestinationItem(OCIBaseModel): + database_id: str = Field(..., description="Database OCID.") + db_name: Optional[str] = Field(None, description="Database name.") + status: Optional[str] = Field( + None, description="CONFIGURED | HAS_BACKUPS | UNCONFIGURED" + ) + destination_types: List[str] = Field( + default_factory=list, + description="Backup destination type(s) (e.g., DBRS, OSS, NFS).", + ) + destination_ids: List[str] = Field( + default_factory=list, description="Backup destination OCIDs." + ) + last_backup_time: Optional[datetime] = Field( + None, description="Most recent backup time, if computed." + ) + + +class ProtectedDatabaseBackupDestinationSummary(OCIBaseModel): + compartment_id: Optional[str] = Field(None, description="Compartment OCID.") + region: Optional[str] = Field(None, description="Region.") + total_databases: int = Field(0, description="Total databases scanned.") + unconfigured_count: int = Field( + 0, description="Count of databases without configured automatic backups." + ) + counts_by_destination_type: Dict[str, int] = Field( + default_factory=dict, description="Counts by destination type." + ) + db_names_by_destination_type: Dict[str, List[str]] = Field( + default_factory=dict, description="DB names grouped by destination type." + ) + unconfigured_db_names: List[str] = Field( + default_factory=list, description="DBs not configured for auto backup." + ) + has_backups_db_names: List[str] = Field( + default_factory=list, description="DBs with backups but not configured." + ) + items: List[ProtectedDatabaseBackupDestinationItem] = Field( + default_factory=list, description="Per-database details." + ) + + +# endregion diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index 3aae981a..2ca4d6d1 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -4,39 +4,147 @@ https://oss.oracle.com/licenses/upl. """ -import os import json -from logging import Logger -from typing import Annotated, Optional +import logging +import os +from logging.handlers import RotatingFileHandler +from typing import Annotated, Any, Optional import oci from fastmcp import FastMCP from oci.monitoring.models import SummarizeMetricsDataDetails +# Database Service models and mappers from oracle.oci_recovery_mcp_server.models import ( - ProtectedDatabaseSummary, - map_protected_database_summary, + Backup, + BackupSummary, + Database, + DatabaseHome, + DatabaseHomeSummary, + DatabaseSummary, + DbSystem, + DbSystemSummary, ProtectedDatabase, - map_protected_database, - ProtectionPolicySummary, - map_protection_policy_summary, + ProtectedDatabaseBackupDestinationItem, + ProtectedDatabaseBackupDestinationSummary, + ProtectedDatabaseBackupSpaceSum, + ProtectedDatabaseHealthCounts, + ProtectedDatabaseRedoCounts, + ProtectedDatabaseSummary, ProtectionPolicy, - map_protection_policy, - RecoveryServiceSubnetSummary, - map_recovery_service_subnet_summary, + ProtectionPolicySummary, RecoveryServiceSubnet, + RecoveryServiceSubnetSummary, + map_backup, + map_backup_summary, + map_database, + map_database_home, + map_database_home_summary, + map_database_summary, + map_db_backup_config, + map_db_system, + map_db_system_summary, + map_protected_database, + map_protected_database_summary, + map_protection_policy, + map_protection_policy_summary, map_recovery_service_subnet, - ProtectedDatabaseHealthCounts, - ProtectedDatabaseRedoCounts, - ProtectedDatabaseBackupSpaceSum, + map_recovery_service_subnet_summary, ) + from . import __project__, __version__ -logger = Logger(__name__, level="INFO") +"""MCP tools available in this server: +- get_compartment_by_name_tool +- list_protected_databases +- get_protected_database +- summarize_protected_database_health +- summarize_protected_database_redo_status +- summarize_backup_space_used +- list_protection_policies +- get_protection_policy +- list_recovery_service_subnets +- get_recovery_service_subnet +- get_recovery_service_metrics +- list_databases +- get_database +- list_backups +- get_backup +- summarise_protected_database_backup_destination +- get_db_home +- list_db_systems +- get_db_system +""" + + +# Logging setup +def setup_logging(): + level_name = os.getenv("ORACLE_MCP_LOG_LEVEL", "INFO").upper() + level = getattr(logging, level_name, logging.INFO) + + base_dir = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "..", "..") + ) + log_dir = os.getenv("ORACLE_MCP_LOG_DIR", os.path.join(base_dir, "logs")) + os.makedirs(log_dir, exist_ok=True) + log_file = os.getenv( + "ORACLE_MCP_LOG_FILE", os.path.join(log_dir, "oci_recovery_mcp_server.log") + ) + + root_logger = logging.getLogger() + root_logger.setLevel(level) + + formatter = logging.Formatter( + fmt="%(asctime)s %(levelname)s [%(name)s] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S%z", + ) + + # Add a rotating file handler if not already present for this file + abs_log_file = os.path.abspath(log_file) + has_file = any( + isinstance(h, RotatingFileHandler) + and getattr(h, "baseFilename", "") == abs_log_file + for h in root_logger.handlers + ) + if not has_file: + fh = RotatingFileHandler( + abs_log_file, maxBytes=10 * 1024 * 1024, backupCount=5, encoding="utf-8" + ) + fh.setLevel(level) + fh.setFormatter(formatter) + root_logger.addHandler(fh) + + # Optional console handler (default on; set ORACLE_MCP_LOG_TO_STDOUT=0 to disable) + if os.getenv("ORACLE_MCP_LOG_TO_STDOUT", "1").lower() in ( + "1", + "true", + "yes", + "y", + ): + has_stream = any( + isinstance(h, logging.StreamHandler) + and not isinstance(h, RotatingFileHandler) + for h in root_logger.handlers + ) + if not has_stream: + sh = logging.StreamHandler() + sh.setLevel(level) + sh.setFormatter(formatter) + root_logger.addHandler(sh) + + # Quiet noisy libraries by default; override with ORACLE_SDK_LOG_LEVEL + logging.getLogger("oci").setLevel(os.getenv("ORACLE_SDK_LOG_LEVEL", "WARNING")) + logging.getLogger("urllib3").setLevel("WARNING") + + +setup_logging() +logger = logging.getLogger(__name__) mcp = FastMCP(name=__project__) -def get_recovery_client(region: str | None = None) -> oci.recovery.DatabaseRecoveryClient: +def get_recovery_client( + region: str | None = None, +) -> oci.recovery.DatabaseRecoveryClient: """ Initialize DatabaseRecoveryClient using the OCI config and a SecurityTokenSigner. Adds a custom user agent derived from the package name and version. @@ -61,6 +169,7 @@ def get_recovery_client(region: str | None = None) -> oci.recovery.DatabaseRecov regional_config["region"] = region return oci.recovery.DatabaseRecoveryClient(regional_config, signer=signer) + def get_identity_client(): config = oci.config.from_file( profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) @@ -74,6 +183,7 @@ def get_identity_client(): signer = oci.auth.signers.SecurityTokenSigner(token, private_key) return oci.identity.IdentityClient(config, signer=signer) + def get_database_client(region: str = None): config = oci.config.from_file( profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) @@ -91,7 +201,8 @@ def get_database_client(region: str = None): regional_config["region"] = region return oci.database.DatabaseClient(regional_config, signer=signer) -def get_monitoring_client(): + +def get_monitoring_client(region: str | None = None): logger.info("entering get_monitoring_client") config = oci.config.from_file( profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) @@ -101,11 +212,15 @@ def get_monitoring_client(): private_key = oci.signer.load_private_key_from_file(config["key_file"]) token_file = config["security_token_file"] - token = None with open(token_file, "r") as f: token = f.read() signer = oci.auth.signers.SecurityTokenSigner(token, private_key) - return oci.monitoring.MonitoringClient(config, signer=signer) + if region is None: + return oci.monitoring.MonitoringClient(config, signer=signer) + regional_config = config.copy() + regional_config["region"] = region + return oci.monitoring.MonitoringClient(regional_config, signer=signer) + def get_tenancy(): config = oci.config.from_file( @@ -140,10 +255,49 @@ def list_all_compartments_internal(only_one_page: bool, limit=100): limit=limit, ) compartments.extend(response.data) - return compartments +def _fetch_db_home_ids_for_compartment( + compartment_id: str, region: Optional[str] = None +) -> list[str]: + """ + Helper: enumerate DB Home OCIDs in a compartment. + Used when a tool needs a db_home_id but the caller omitted it. + Returns a list of DB Home OCIDs (may be empty). + """ + try: + client = get_database_client(region) + resp = client.list_db_homes(compartment_id=compartment_id) + data = resp.data + raw_list = getattr(data, "items", data) + raw_list = ( + raw_list + if isinstance(raw_list, list) + else [raw_list] if raw_list is not None else [] + ) + ids: list[str] = [] + for h in raw_list: + hid = getattr(h, "id", None) + if not hid: + try: + d = ( + getattr(oci.util, "to_dict")(h) + if hasattr(oci, "util") and hasattr(oci.util, "to_dict") + else None + ) + if isinstance(d, dict): + hid = d.get("id") + except Exception: + pass + if hid: + ids.append(hid) + return ids + except Exception: + # Conservative: on error, return empty so callers can react (e.g., empty results) + return [] + + def get_compartment_by_name(compartment_name: str): """Internal function to get compartment by name with caching""" compartments = list_all_compartments_internal(False) @@ -154,6 +308,7 @@ def get_compartment_by_name(compartment_name: str): return None + @mcp.tool() def get_compartment_by_name_tool(name: str) -> str: """Return a compartment matching the provided name""" @@ -163,6 +318,7 @@ def get_compartment_by_name_tool(name: str) -> str: else: return json.dumps({"error": f"Compartment '{name}' not found."}) + @mcp.tool( description="List Protected Databases in a given compartment with optional filters." ) @@ -170,7 +326,10 @@ def list_protected_databases( compartment_id: Annotated[str, "The OCID of the compartment"], lifecycle_state: Annotated[ Optional[str], - 'Filter by lifecycle state (e.g., "CREATING", "UPDATING", "ACTIVE", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED")', + ( + 'Filter by lifecycle state (e.g., "CREATING", "UPDATING", ' + '"ACTIVE", "DELETE_SCHEDULED", "DELETING", "DELETED", "FAILED")' + ), ] = None, display_name: Annotated[Optional[str], "Exact match on display name"] = None, id: Annotated[Optional[str], "Protected Database OCID"] = None, @@ -185,9 +344,7 @@ def list_protected_databases( Optional[str], "Pagination token (opc-next-page) to continue listing from", ] = None, - sort_order: Annotated[ - Optional[str], 'Sort order: "ASC" or "DESC"' - ] = None, + sort_order: Annotated[Optional[str], 'Sort order: "ASC" or "DESC"'] = None, sort_by: Annotated[ Optional[str], 'Sort by field: "timeCreated" or "displayName"' ] = None, @@ -255,7 +412,9 @@ def list_protected_databases( @mcp.tool(description="Get a Protected Database by OCID.") def get_protected_database( protected_database_id: Annotated[str, "Protected Database OCID"], - opc_request_id: Annotated[Optional[str], "Unique identifier for the request"] = None, + opc_request_id: Annotated[ + Optional[str], "Unique identifier for the request" + ] = None, region: Annotated[ Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" ] = None, @@ -285,7 +444,14 @@ def get_protected_database( raise -@mcp.tool(description="Summarizes Protected Database health status counts (PROTECTED, WARNING, ALERT) in a compartment. Lists protected databases then fetches each to read its health field; returns counts.") +@mcp.tool( + description=( + "Summarizes Protected Database health status counts (PROTECTED, WARNING, ALERT, UNKNOWN) " + "in a compartment. " + "Lists protected databases then fetches each to read its health field; returns counts including " + "UNKNOWN for missing/None health." + ) +) def summarize_protected_database_health( compartment_id: Annotated[ Optional[str], @@ -294,10 +460,12 @@ def summarize_protected_database_health( region: Annotated[ Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" ] = None, -) -> ProtectedDatabaseHealthCounts: +) -> dict: """ - Summarizes Protected Database health status counts (PROTECTED, WARNING, ALERT) in a compartment. - The tool lists protected databases then fetches each to read its health field; returns counts. + Summarizes Protected Database health status counts (PROTECTED, WARNING, ALERT, UNKNOWN) in a compartment. + The tool lists protected databases, reads health from summary when available, falls back to GET per PD, + and returns counts. Total equals PDs scanned. UNKNOWN counts PDs with missing/None health (often DELETED + or transitional). """ try: client = get_recovery_client(region) @@ -306,6 +474,8 @@ def summarize_protected_database_health( protected = 0 warning = 0 alert = 0 + unknown = 0 + scanned = 0 has_next_page = True next_page: Optional[str] = None @@ -314,15 +484,29 @@ def summarize_protected_database_health( list_kwargs = { "compartment_id": comp_id, "page": next_page, + "lifecycle_state": "ACTIVE", } - response: oci.response.Response = client.list_protected_databases(**list_kwargs) + response: oci.response.Response = client.list_protected_databases( + **list_kwargs + ) has_next_page = response.has_next_page next_page = response.next_page if hasattr(response, "next_page") else None data = response.data items = getattr(data, "items", data) for item in items or []: - pd_id = getattr(item, "id", None) or (getattr(item, "data", None) and getattr(item.data, "id", None)) + # Read health from summary first + health = getattr(item, "health", None) + if not health and hasattr(item, "__dict__"): + try: + health = item.__dict__.get("health") + except Exception: + health = None + + pd_id = getattr(item, "id", None) or ( + getattr(item, "data", None) and getattr(item.data, "id", None) + ) + logger.info(f"Item structure: {item}, Extracted id: {pd_id}") if pd_id is None: try: item_dict = getattr(item, "__dict__", None) or {} @@ -330,13 +514,23 @@ def summarize_protected_database_health( except Exception: pd_id = None if not pd_id: + # Can't fetch details; skip counting this entry continue - pd_resp: oci.response.Response = client.get_protected_database(protected_database_id=pd_id) - pd = pd_resp.data - health = getattr(pd, "health", None) - if not health and hasattr(pd, "__dict__"): - health = pd.__dict__.get("health") + scanned += 1 + + # If summary lacked health, fetch full PD + if not health: + try: + pd_resp: oci.response.Response = client.get_protected_database( + protected_database_id=pd_id + ) + pd = pd_resp.data + health = getattr(pd, "health", None) + if not health and hasattr(pd, "__dict__"): + health = pd.__dict__.get("health") + except Exception: + health = None if health == "PROTECTED": protected += 1 @@ -345,13 +539,20 @@ def summarize_protected_database_health( elif health == "ALERT": alert += 1 else: - # unknown/None health -> not counted in the three buckets - pass + # unknown/None health + unknown += 1 - total = protected + warning + alert + total = scanned logger.info( - f"Health summary for compartment {comp_id} (region={region}): " - f"PROTECTED={protected}, WARNING={warning}, ALERT={alert}, TOTAL={total}" + "Health summary for compartment %s (region=%s): " + "PROTECTED=%s, WARNING=%s, ALERT=%s, UNKNOWN=%s, TOTAL=%s", + comp_id, + region, + protected, + warning, + alert, + unknown, + total, ) return ProtectedDatabaseHealthCounts( compartment_id=comp_id, @@ -359,6 +560,7 @@ def summarize_protected_database_health( protected=protected, warning=warning, alert=alert, + unknown=unknown, total=total, ) except Exception as e: @@ -366,7 +568,13 @@ def summarize_protected_database_health( raise -@mcp.tool(description="Summarizes redo transport enablement for Protected Databases in a compartment. Lists protected databases then fetches each to inspect is_redo_logs_enabled (true=enabled, false=disabled).") +@mcp.tool( + description=( + "Summarizes redo transport enablement for Protected Databases in a compartment. " + "Lists protected databases then fetches each to inspect " + "is_redo_logs_shipped (true=enabled, false=disabled)." + ) +) def summarize_protected_database_redo_status( compartment_id: Annotated[ Optional[str], @@ -375,10 +583,11 @@ def summarize_protected_database_redo_status( region: Annotated[ Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" ] = None, -) -> ProtectedDatabaseRedoCounts: +) -> dict: """ Summarizes redo transport enablement for Protected Databases in a compartment. - Lists protected databases then fetches each to inspect is_redo_logs_enabled (true=enabled, false=disabled). + Lists protected databases then fetches each to inspect + is_redo_logs_shipped (true=enabled, false=disabled). """ try: client = get_recovery_client(region) @@ -394,8 +603,11 @@ def summarize_protected_database_redo_status( list_kwargs = { "compartment_id": comp_id, "page": next_page, + "lifecycle_state": "ACTIVE", } - response: oci.response.Response = client.list_protected_databases(**list_kwargs) + response: oci.response.Response = client.list_protected_databases( + **list_kwargs + ) has_next_page = response.has_next_page next_page = response.next_page if hasattr(response, "next_page") else None @@ -403,7 +615,9 @@ def summarize_protected_database_redo_status( items = getattr(data, "items", data) for item in items or []: # Robustly get the PD OCID from summary item - pd_id = getattr(item, "id", None) or (getattr(item, "data", None) and getattr(item.data, "id", None)) + pd_id = getattr(item, "id", None) or ( + getattr(item, "data", None) and getattr(item.data, "id", None) + ) if pd_id is None: try: item_dict = getattr(item, "__dict__", None) or {} @@ -413,12 +627,29 @@ def summarize_protected_database_redo_status( if not pd_id: continue - # Fetch full Protected Database to read is_redo_logs_enabled - pd_resp: oci.response.Response = client.get_protected_database(protected_database_id=pd_id) + # Fetch full Protected Database to read is_redo_logs_shipped (primary) + pd_resp: oci.response.Response = client.get_protected_database( + protected_database_id=pd_id + ) pd = pd_resp.data - redo_enabled = getattr(pd, "is_redo_logs_enabled", None) + redo_enabled = getattr(pd, "is_redo_logs_shipped", None) if redo_enabled is None and hasattr(pd, "__dict__"): - redo_enabled = pd.__dict__.get("is_redo_logs_enabled") or pd.__dict__.get("isRedoLogsEnabled") + redo_enabled = pd.__dict__.get( + "is_redo_logs_shipped" + ) or pd.__dict__.get("isRedoLogsShipped") + # Fallback: some SDK/reporting expose Real-time protection + # under metrics as is_redo_logs_enabled + if redo_enabled is None: + try: + m = getattr(pd, "metrics", None) + if m is not None: + redo_enabled = getattr(m, "is_redo_logs_enabled", None) + if redo_enabled is None and hasattr(m, "__dict__"): + redo_enabled = m.__dict__.get( + "is_redo_logs_enabled" + ) or m.__dict__.get("isRedoLogsEnabled") + except Exception: + pass if redo_enabled is True: enabled += 1 @@ -430,8 +661,13 @@ def summarize_protected_database_redo_status( total = enabled + disabled logger.info( - f"Redo transport summary for compartment {comp_id} (region={region}): " - f"ENABLED={enabled}, DISABLED={disabled}, TOTAL={total}" + "Redo transport summary for compartment %s (region=%s): " + "ENABLED=%s, DISABLED=%s, TOTAL=%s", + comp_id, + region, + enabled, + disabled, + total, ) return ProtectedDatabaseRedoCounts( compartment_id=comp_id, @@ -441,11 +677,19 @@ def summarize_protected_database_redo_status( total=total, ) except Exception as e: - logger.error(f"Error in summarize_protected_database_redo_status tool: {str(e)}") + logger.error( + f"Error in summarize_protected_database_redo_status tool: {str(e)}" + ) raise -@mcp.tool(description="Sums backup space used (GB) by Protected Databases in a compartment. Lists protected databases in the compartment, fetches each, reads metrics.backup_space_used_in_gbs (or variants), and returns the total.") +@mcp.tool( + description=( + "Sums backup space used (GB) by Protected Databases in a compartment by " + "reading backup_space_used_in_gbs from metrics. " + "Returns compartmentId, region, totalDatabasesScanned, sumBackupSpaceUsedInGBs." + ) +) def summarize_backup_space_used( compartment_id: Annotated[ Optional[str], @@ -455,19 +699,19 @@ def summarize_backup_space_used( Optional[str], "Canonical OCI region (e.g., us-ashburn-1) to execute the request in.", ] = None, -) -> ProtectedDatabaseBackupSpaceSum: +) -> dict: """ Sums backup space used (GB) by Protected Databases in a compartment. - Lists protected databases in the compartment, fetches each, reads metrics.backup_space_used_in_gbs - (or variants), and returns the total as a ProtectedDatabaseBackupSpaceSum model. + For each PD: scans, increments total, and reads backup_space_used_in_gbs from metrics. + Important: metrics are not reliably exposed on list summaries; fetch the full PD to read metrics. + Returns: compartmentId, region, totalDatabasesScanned, sumBackupSpaceUsedInGBs. """ try: client = get_recovery_client(region) comp_id = compartment_id or get_tenancy() - - total_scanned: int = 0 - sum_gb: float = 0.0 - + sum_gb = 0.0 + scanned = 0 + missing_metrics = 0 has_next_page = True next_page: Optional[str] = None @@ -476,7 +720,9 @@ def summarize_backup_space_used( "compartment_id": comp_id, "page": next_page, } - response: oci.response.Response = client.list_protected_databases(**list_kwargs) + response: oci.response.Response = client.list_protected_databases( + **list_kwargs + ) has_next_page = response.has_next_page next_page = response.next_page if hasattr(response, "next_page") else None @@ -484,8 +730,11 @@ def summarize_backup_space_used( items = getattr(data, "items", data) for item in items or []: - # Extract PD OCID from the summary item robustly - pd_id = getattr(item, "id", None) or (getattr(item, "data", None) and getattr(item.data, "id", None)) + # Robustly get the PD OCID from summary item (same as redo status tool) + pd_id = getattr(item, "id", None) or ( + getattr(item, "data", None) and getattr(item.data, "id", None) + ) + logger.info(f"Item structure: {item}, Extracted id: {pd_id}") if pd_id is None: try: item_dict = getattr(item, "__dict__", None) or {} @@ -495,55 +744,82 @@ def summarize_backup_space_used( if not pd_id: continue - total_scanned += 1 - - # Fetch full PD to read metrics - pd_resp: oci.response.Response = client.get_protected_database(protected_database_id=pd_id) - pd = pd_resp.data - - # Try PD.metrics.backup_space_used_in_gbs (and variants) - metrics = getattr(pd, "metrics", None) - val = None - if metrics is not None: - val = getattr(metrics, "backup_space_used_in_gbs", None) - if val is None and hasattr(metrics, "__dict__"): - val = metrics.__dict__.get("backup_space_used_in_gbs") or metrics.__dict__.get("backupSpaceUsedInGbs") + scanned += 1 - # Fallback to summary metrics on list item if full PD had none - if val is None: + # Always fetch the full Protected Database to read metrics reliably + gb_val = None + try: + pd_resp: oci.response.Response = client.get_protected_database( + protected_database_id=pd_id + ) + pd_obj = pd_resp.data + metrics = getattr(pd_obj, "metrics", None) + if metrics is None and hasattr(pd_obj, "__dict__"): + metrics = getattr(pd_obj, "__dict__", {}).get("metrics") + # metrics may be a model or a dict; normalise access + if metrics is not None: + if hasattr(metrics, "backup_space_used_in_gbs"): + gb_val = getattr(metrics, "backup_space_used_in_gbs", None) + if gb_val is None and hasattr(metrics, "__dict__"): + gb_val = metrics.__dict__.get( + "backup_space_used_in_gbs" + ) or metrics.__dict__.get("backupSpaceUsedInGbs") + if gb_val is None and isinstance(metrics, dict): + gb_val = metrics.get( + "backup_space_used_in_gbs" + ) or metrics.get("backupSpaceUsedInGbs") + except Exception: + # If GET fails, fall back to any summary metrics representation try: - item_metrics = getattr(item, "metrics", None) - if item_metrics is not None: - val = getattr(item_metrics, "backup_space_used_in_gbs", None) - if val is None and hasattr(item_metrics, "__dict__"): - val = item_metrics.__dict__.get("backup_space_used_in_gbs") or item_metrics.__dict__.get("backupSpaceUsedInGbs") + m = getattr(item, "metrics", None) + if m is not None: + gb_val = getattr(m, "backup_space_used_in_gbs", None) + if gb_val is None and hasattr(m, "__dict__"): + gb_val = m.__dict__.get( + "backup_space_used_in_gbs" + ) or m.__dict__.get("backupSpaceUsedInGbs") + if gb_val is None and isinstance(m, dict): + gb_val = m.get("backup_space_used_in_gbs") or m.get( + "backupSpaceUsedInGbs" + ) except Exception: - pass + gb_val = None + + if gb_val is None: + missing_metrics += 1 - # Accumulate if numeric try: - if val is not None: - sum_gb += float(val) + gb = float(gb_val) if gb_val is not None else 0.0 except Exception: - # Ignore non-numeric values gracefully - pass + gb = 0.0 + + sum_gb += gb logger.info( - f"Backup space used summary for compartment {comp_id} (region={region}): " - f"scanned={total_scanned}, sum_gb={sum_gb}" + "Backup space used summary for compartment %s (region=%s): " + "scanned=%s, total_gb=%s, missing_metrics=%s", + comp_id, + region, + scanned, + sum_gb, + missing_metrics, ) return ProtectedDatabaseBackupSpaceSum( - compartment_id=comp_id, + compartmentId=comp_id, region=region, - total_databases_scanned=total_scanned, - sum_backup_space_used_in_gbs=sum_gb, + totalDatabasesScanned=scanned, + sumBackupSpaceUsedInGBs=round(sum_gb, 2), ) + # logger.info(f"Returning dict result: {result}") + # return result except Exception as e: logger.error(f"Error in summarize_backup_space_used tool: {str(e)}") raise -@mcp.tool(description="List Protection Policies in a given compartment with optional filters.") +@mcp.tool( + description="List Protection Policies in a given compartment with optional filters." +) def list_protection_policies( compartment_id: Annotated[str, "The OCID of the compartment"], lifecycle_state: Annotated[ @@ -557,9 +833,7 @@ def list_protection_policies( Optional[str], "Pagination token (opc-next-page) to continue listing from", ] = None, - sort_order: Annotated[ - Optional[str], 'Sort order: "ASC" or "DESC"' - ] = None, + sort_order: Annotated[Optional[str], 'Sort order: "ASC" or "DESC"'] = None, sort_by: Annotated[ Optional[str], 'Sort by field: "timeCreated" or "displayName"' ] = None, @@ -623,7 +897,9 @@ def list_protection_policies( @mcp.tool(description="Get a Protection Policy by OCID.") def get_protection_policy( protection_policy_id: Annotated[str, "Protection Policy OCID"], - opc_request_id: Annotated[Optional[str], "Unique identifier for the request"] = None, + opc_request_id: Annotated[ + Optional[str], "Unique identifier for the request" + ] = None, region: Annotated[ Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" ] = None, @@ -653,12 +929,17 @@ def get_protection_policy( raise -@mcp.tool(description="List Recovery Service Subnets in a given compartment with optional filters.") +@mcp.tool( + description="List Recovery Service Subnets in a given compartment with optional filters." +) def list_recovery_service_subnets( compartment_id: Annotated[str, "The OCID of the compartment"], lifecycle_state: Annotated[ Optional[str], - 'Filter by lifecycle state (e.g., "CREATING", "ACTIVE", "UPDATING", "DELETING", "DELETED", "FAILED")', + ( + 'Filter by lifecycle state (e.g., "CREATING", "ACTIVE", ' + '"UPDATING", "DELETING", "DELETED", "FAILED")' + ), ] = None, display_name: Annotated[Optional[str], "Exact match on display name"] = None, id: Annotated[Optional[str], "Recovery Service Subnet OCID"] = None, @@ -668,9 +949,7 @@ def list_recovery_service_subnets( Optional[str], "Pagination token (opc-next-page) to continue listing from", ] = None, - sort_order: Annotated[ - Optional[str], 'Sort order: "ASC" or "DESC"' - ] = None, + sort_order: Annotated[Optional[str], 'Sort order: "ASC" or "DESC"'] = None, sort_by: Annotated[ Optional[str], 'Sort by field: "timeCreated" or "displayName"' ] = None, @@ -714,7 +993,9 @@ def list_recovery_service_subnets( if opc_request_id is not None: kwargs["opc_request_id"] = opc_request_id - response: oci.response.Response = client.list_recovery_service_subnets(**kwargs) + response: oci.response.Response = client.list_recovery_service_subnets( + **kwargs + ) has_next_page = response.has_next_page next_page = response.next_page if hasattr(response, "next_page") else None @@ -736,7 +1017,9 @@ def list_recovery_service_subnets( @mcp.tool(description="Get a Recovery Service Subnet by OCID.") def get_recovery_service_subnet( recovery_service_subnet_id: Annotated[str, "Recovery Service Subnet OCID"], - opc_request_id: Annotated[Optional[str], "Unique identifier for the request"] = None, + opc_request_id: Annotated[ + Optional[str], "Unique identifier for the request" + ] = None, region: Annotated[ Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" ] = None, @@ -779,21 +1062,24 @@ def get_recovery_service_metrics( ] = "SpaceUsedForRecoveryWindow", resolution: Annotated[ str, - "The granularity of the metric. Currently we only support: 1m, 5m, 1h, 1d. Default: 1m.", - ] = "1m", + "The granularity of the metric. Currently we only support: 1m, 5m, 1h, 1d. Default: 1h.", + ] = "1h", aggregation: Annotated[ str, "The aggregation for the metric. Currently we only support: " - "mean, sum, max, min, count. Default: mean", - ] = "mean", + "mean, sum, max, min, count. Default: max", + ] = "max", protected_database_id: Annotated[ str, - "Optional protected database OCID to filter by " "(maps to resourceId dimension)", + "Optional protected database OCID to filter by " + "(maps to resourceId dimension)", ] = None, ) -> list[dict]: monitoring_client = get_monitoring_client() namespace = "oci_recovery_service" - filter_clause = f'{{resourceId="{protected_database_id}"}}' if protected_database_id else "" + filter_clause = ( + f'{{resourceId="{protected_database_id}"}}' if protected_database_id else "" + ) query = f"{metricName}[{resolution}]{filter_clause}.{aggregation}()" series_list = monitoring_client.summarize_metrics_data( @@ -827,13 +1113,812 @@ def get_recovery_service_metrics( return result +# ---------------- Database Service Tools ---------------- + + +@mcp.tool( + description=( + "Gets a list of the databases in the specified Database Home. " + "If db_home_id is omitted, the tool will automatically look up all DB Homes in the given compartment " + "and aggregate results per DB Home." + ) +) +def list_databases( + compartment_id: Annotated[ + Optional[str], "The compartment OCID. Required if db_home_id is not provided." + ] = None, + db_home_id: Annotated[ + Optional[str], + "A Database Home OCID. If omitted, all DB Homes in the compartment will be used.", + ] = None, + system_id: Annotated[ + Optional[str], "The OCID of the Exadata DB system to filter by (Exadata only)." + ] = None, + limit: Annotated[ + Optional[int], "The maximum number of items to return per page." + ] = None, + page: Annotated[ + Optional[str], "The pagination token to continue listing from." + ] = None, + sort_by: Annotated[Optional[str], 'Sort by field: "DBNAME" | "TIMECREATED"'] = None, + sort_order: Annotated[Optional[str], '"ASC" or "DESC"'] = None, + lifecycle_state: Annotated[Optional[str], "Exact lifecycle state filter."] = None, + db_name: Annotated[ + Optional[str], "Exact database name filter (case-insensitive)." + ] = None, + region: Annotated[ + Optional[str], "Region to execute the request, e.g., us-ashburn-1." + ] = None, +) -> list[DatabaseSummary]: + try: + client = get_database_client(region) + + # Determine DB Home scope + if db_home_id is None: + if not compartment_id: + raise ValueError( + "Either db_home_id must be provided or compartment_id " + "must be set to derive DB Homes." + ) + home_ids = _fetch_db_home_ids_for_compartment(compartment_id, region=region) + else: + home_ids = [db_home_id] + + if not home_ids: + return [] + + pd_policy_by_dbid: dict[str, str] = {} + if compartment_id: + try: + rec_client = get_recovery_client(region) + has_next = True + next_page = None + while has_next: + lp = rec_client.list_protected_databases( + compartment_id=compartment_id, page=next_page + ) + has_next = lp.has_next_page + next_page = getattr(lp, "next_page", None) + pdata = lp.data + pitems = getattr(pdata, "items", pdata) + for it in pitems or []: + try: + if hasattr(oci, "util") and hasattr(oci.util, "to_dict"): + d = oci.util.to_dict(it) + else: + d = getattr(it, "__dict__", {}) or {} + except Exception: + d = getattr(it, "__dict__", {}) or {} + dbid = d.get("databaseId") or d.get("database_id") + ppid = d.get("protectionPolicyId") or d.get( + "protection_policy_id" + ) + if dbid and ppid and dbid not in pd_policy_by_dbid: + pd_policy_by_dbid[dbid] = ppid + except Exception: + pd_policy_by_dbid = {} + + results: list[DatabaseSummary] = [] + common_kwargs: dict = {} + if compartment_id is not None: + common_kwargs["compartment_id"] = compartment_id + if system_id is not None: + common_kwargs["system_id"] = system_id + if limit is not None: + common_kwargs["limit"] = limit + if page is not None: + common_kwargs["page"] = page + if sort_by is not None: + common_kwargs["sort_by"] = sort_by + if sort_order is not None: + common_kwargs["sort_order"] = sort_order + if lifecycle_state is not None: + common_kwargs["lifecycle_state"] = lifecycle_state + if db_name is not None: + common_kwargs["db_name"] = db_name + + for hid in home_ids: + kwargs = dict(common_kwargs) + kwargs["db_home_id"] = hid + response: oci.response.Response = client.list_databases(**kwargs) + raw = getattr(response.data, "items", response.data) + for item in raw or []: + mapped = map_database_summary(item) + if mapped is not None: + # Enrich db_backup_config for summaries by fetching full Database only if missing + try: + if getattr(mapped, "db_backup_config", None) is None: + db_id = getattr(item, "id", None) or ( + getattr(item, "data", None) + and getattr(item.data, "id", None) + ) + if not db_id and hasattr(item, "__dict__"): + db_id = item.__dict__.get("id") + if db_id: + gd = client.get_database(database_id=db_id).data + # Try to locate backup config from object or dict forms + cfg_src = getattr( + gd, "db_backup_config", None + ) or getattr(gd, "database_backup_config", None) + if cfg_src is None: + try: + d = ( + oci.util.to_dict(gd) + if hasattr(oci, "util") + and hasattr(oci.util, "to_dict") + else (getattr(gd, "__dict__", {}) or {}) + ) + except Exception: + d = getattr(gd, "__dict__", {}) or {} + cfg_src = ( + d.get("dbBackupConfig") + or d.get("db_backup_config") + or d.get("databaseBackupConfig") + or d.get("database_backup_config") + ) + mapped.db_backup_config = map_db_backup_config(cfg_src) + except Exception: + # Best-effort enrichment; ignore failures and still return the summary + pass + try: + if compartment_id is not None: + mapped.protection_policy_id = pd_policy_by_dbid.get( + mapped.id + ) + except Exception: + pass + results.append(mapped) + + return results + except Exception as e: + logger.error(f"Error in list_databases tool: {e}") + raise + + +@mcp.tool(description="Retrieves full details for a Database by OCID.") +def get_database( + database_id: Annotated[str, "OCID of the Database to retrieve."], + region: Annotated[ + Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." + ] = None, +) -> Database: + try: + client = get_database_client(region) + resp = client.get_database(database_id=database_id) + mapped = map_database(resp.data) + # Enrich protection_policy_id by correlating with Recovery Service + # Protected Databases in the same compartment + try: + comp_id = getattr(resp.data, "compartment_id", None) + if comp_id is None: + try: + d = ( + oci.util.to_dict(resp.data) + if hasattr(oci, "util") and hasattr(oci.util, "to_dict") + else (getattr(resp.data, "__dict__", {}) or {}) + ) + except Exception: + d = getattr(resp.data, "__dict__", {}) or {} + comp_id = d.get("compartmentId") or d.get("compartment_id") + if comp_id: + rec_client = get_recovery_client(region) + has_next = True + next_page = None + found_ppid = None + while has_next and not found_ppid: + lp = rec_client.list_protected_databases( + compartment_id=comp_id, page=next_page + ) + has_next = lp.has_next_page + next_page = getattr(lp, "next_page", None) + pdata = lp.data + pitems = getattr(pdata, "items", pdata) + for it in pitems or []: + try: + if hasattr(oci, "util") and hasattr(oci.util, "to_dict"): + d = oci.util.to_dict(it) + else: + d = getattr(it, "__dict__", {}) or {} + except Exception: + d = getattr(it, "__dict__", {}) or {} + if (d.get("databaseId") or d.get("database_id")) == database_id: + found_ppid = d.get("protectionPolicyId") or d.get( + "protection_policy_id" + ) + break + if mapped is not None: + mapped.protection_policy_id = found_ppid + except Exception: + pass + return mapped + except Exception as e: + logger.error(f"Error in get_database tool: {e}") + raise + + +@mcp.tool( + description=( + "Lists Database Backups with optional filters. " + "If neither database_id nor compartment_id is provided, defaults to tenancy compartment." + ) +) +def list_backups( + compartment_id: Annotated[ + Optional[str], "OCID of the compartment to scope the search." + ] = None, + database_id: Annotated[ + Optional[str], "OCID of the Database to filter backups for." + ] = None, + lifecycle_state: Annotated[Optional[str], "Filter by lifecycle state."] = None, + type: Annotated[ + Optional[str], "Backup type filter (e.g., INCREMENTAL, FULL)." + ] = None, + limit: Annotated[Optional[int], "Maximum number of items per page."] = None, + page: Annotated[Optional[str], "Pagination token (opc-next-page)."] = None, + region: Annotated[ + Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." + ] = None, +) -> list[BackupSummary]: + try: + client = get_database_client(region) + results: list[BackupSummary] = [] + has_next = True + next_page = page + if not compartment_id and not database_id: + compartment_id = get_tenancy() + while has_next: + kwargs: dict = {"page": next_page} + if database_id: + kwargs["database_id"] = database_id + if compartment_id: + kwargs["compartment_id"] = compartment_id + if lifecycle_state: + kwargs["lifecycle_state"] = lifecycle_state + if type: + kwargs["type"] = type + if limit is not None: + kwargs["limit"] = limit + resp = client.list_backups(**kwargs) + data = getattr(resp.data, "items", resp.data) + for it in data or []: + m = map_backup_summary(it) + if m is not None: + results.append(m) + has_next = resp.has_next_page + next_page = resp.next_page if hasattr(resp, "next_page") else None + return results + except Exception as e: + logger.error(f"Error in list_backups tool: {e}") + raise + + +@mcp.tool(description="Retrieves a Database Backup by OCID.") +def get_backup( + backup_id: Annotated[str, "OCID of the Backup to retrieve."], + region: Annotated[ + Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." + ] = None, +) -> Backup: + try: + client = get_database_client(region) + resp = client.get_backup(backup_id=backup_id) + return map_backup(resp.data) + except Exception as e: + logger.error(f"Error in get_backup tool: {e}") + raise + + +@mcp.tool( + description=( + "Summarizes Database backup configuration and destinations " + "for databases in a compartment or DB Home. " + "Reports counts by destination type (e.g., DBRS, OBJECT_STORE, NFS), " + "number unconfigured, and per-DB details. " + "If db_home_id is omitted, the tool automatically discovers all DB Homes " + "in the compartment and aggregates per-home." + ) +) +def summarise_protected_database_backup_destination( + compartment_id: Annotated[ + Optional[str], + "OCID of the compartment. If omitted, defaults to the tenancy/DEFAULT profile.", + ] = None, + region: Annotated[ + Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." + ] = None, + db_home_id: Annotated[ + Optional[str], + "Optional DB Home OCID to scope databases. If omitted, all DB Homes in the compartment are used.", + ] = None, + include_last_backup_time: Annotated[ + bool, "If true, compute last backup time per DB (extra API calls)." + ] = False, +) -> ProtectedDatabaseBackupDestinationSummary: + try: + db_client = get_database_client(region) + rec_client = get_recovery_client(region) + if not compartment_id: + compartment_id = get_tenancy() + + # Discover DB Homes if not specified + home_ids: list[str] = ( + [db_home_id] + if db_home_id + else _fetch_db_home_ids_for_compartment(compartment_id, region=region) + ) + + # Collect database summaries + db_summaries: list[Any] = [] + if home_ids: + for hid in home_ids: + resp = db_client.list_databases( + compartment_id=compartment_id, + db_home_id=hid, + lifecycle_state="AVAILABLE", + ) + data = getattr(resp.data, "items", resp.data) + if isinstance(data, list): + db_summaries.extend(data) + elif data is not None: + db_summaries.append(data) + + # Build a map of database_id -> list of Protected Databases (from Recovery Service) + pd_by_dbid: dict[str, list[dict]] = {} + try: + has_next = True + next_page = None + while has_next: + lp = rec_client.list_protected_databases( + compartment_id=compartment_id, page=next_page + ) + has_next = lp.has_next_page + next_page = getattr(lp, "next_page", None) + pdata = lp.data + pitems = getattr(pdata, "items", pdata) + for it in pitems or []: + # convert to dict for easy field access + try: + if hasattr(oci, "util") and hasattr(oci.util, "to_dict"): + d = oci.util.to_dict(it) + else: + d = getattr(it, "__dict__", {}) or {} + except Exception: + d = getattr(it, "__dict__", {}) or {} + dbid = d.get("databaseId") or d.get("database_id") + if dbid: + pd_by_dbid.setdefault(dbid, []).append(d) + except Exception: + pd_by_dbid = {} + + def _to_dict(o: Any) -> dict: + try: + if hasattr(oci, "util") and hasattr(oci.util, "to_dict"): + d = oci.util.to_dict(o) + if isinstance(d, dict): + return d + except Exception: + pass + return getattr(o, "__dict__", {}) if hasattr(o, "__dict__") else {} + + def _get(o: Any, *names: str): + for n in names: + if hasattr(o, n): + v = getattr(o, n) + if v is not None: + return v + d = _to_dict(o) + for n in names: + if d.get(n) is not None: + return d.get(n) + return None + + def _extract_backup_destination_details(db_dict: dict) -> list[dict]: + cfg = None + for k in ( + "dbBackupConfig", + "db_backup_config", + "backupConfig", + "backup_config", + "databaseBackupConfig", + "database_backup_config", + ): + if isinstance(db_dict.get(k), dict): + cfg = db_dict.get(k) + break + if cfg is None: + cfg = db_dict if isinstance(db_dict, dict) else {} + details = ( + cfg.get("backupDestinationDetails") + or cfg.get("backup_destination_details") + or db_dict.get("backupDestinationDetails") + or db_dict.get("backup_destination_details") + ) + if not details: + return [] + return details if isinstance(details, list) else [details] + + def _normalize_dest_type(t: Optional[str]) -> str: + if not t: + return "UNKNOWN" + u = str(t).upper() + if u in ( + "RECOVERY_SERVICE", + "RECOVERY-SERVICE", + "DBRS", + "RECOVERY_SERVICE_BACKUP_DESTINATION", + ): + return "DBRS" + if u in ("OBJECT_STORE", "OBJECTSTORE", "OBJECT_STORAGE"): + return "OSS" + if u in ("NFS",): + return "NFS" + return u + + def _is_auto_backup_enabled(db_dict: dict) -> bool: + cfg = None + for k in ( + "dbBackupConfig", + "db_backup_config", + "backupConfig", + "backup_config", + "databaseBackupConfig", + "database_backup_config", + ): + v = db_dict.get(k) + if isinstance(v, dict): + cfg = v + break + if isinstance(cfg, dict): + for key in ( + "isAutoBackupEnabled", + "is_auto_backup_enabled", + "autoBackupEnabled", + "auto_backup_enabled", + ): + if key in cfg and cfg[key] is not None: + return bool(cfg[key]) + for key in ( + "isAutoBackupEnabled", + "is_auto_backup_enabled", + "autoBackupEnabled", + "auto_backup_enabled", + ): + if key in db_dict and db_dict[key] is not None: + return bool(db_dict[key]) + return False + + def _read_backup_times_from_obj(o: Any) -> list[Any]: + times = [] + for attr in ( + "time_ended", + "timeEnded", + "time_started", + "timeStarted", + "time_created", + "timeCreated", + ): + v = getattr(o, attr, None) + if v is not None: + times.append(v) + if not times: + d = _to_dict(o) + for k in ("timeEnded", "timeStarted", "timeCreated"): + if d.get(k) is not None: + times.append(d[k]) + return times + + items: list[ProtectedDatabaseBackupDestinationItem] = [] + counts_by_type: dict[str, int] = {} + db_names_by_type: dict[str, list[str]] = {} + unconfigured = 0 + unconfigured_names: list[str] = [] + has_backups_names: list[str] = [] + + get_db = db_client.get_database + list_bk = db_client.list_backups + + for s in db_summaries: + try: + sid = _get(s, "id") + if not sid: + continue + db_name = _get(s, "db_name", "dbName") + + dresp = get_db(database_id=sid) + d_obj = getattr(dresp, "data", None) + d_dict = _to_dict(d_obj) + + dest_details = _extract_backup_destination_details(d_dict) + dest_types: list[str] = [] + dest_ids: list[str] = [] + for det in dest_details: + dd = det if isinstance(det, dict) else _to_dict(det) + t_norm = _normalize_dest_type( + dd.get("type") or dd.get("destinationType") + ) + did = ( + dd.get("id") + or dd.get("backupDestinationId") + or dd.get("destinationId") + ) + if t_norm: + dest_types.append(t_norm) + if did: + dest_ids.append(did) + + # Augment with Recovery Service protected database linkage + pds_for_db = pd_by_dbid.get(sid, []) + if pds_for_db: + dest_types.append("DBRS") + try: + # Use PD OCID for reference if present + dest_ids.append(pds_for_db[0].get("id")) + except Exception: + pass + + # Deduplicate + dest_types = list(dict.fromkeys([t for t in dest_types if t])) + # Enforce exclusivity between DBRS and OSS: + # prefer DBRS (no dual classification) + if "DBRS" in dest_types and "OSS" in dest_types: + dest_types = ["DBRS"] + dest_ids = list(dict.fromkeys([d for d in dest_ids if d])) + + auto_enabled = _is_auto_backup_enabled(d_dict) + # Consider configured if auto-backup enabled OR any destination types + # detected (incl. DBRS via Recovery Service) + configured = bool(auto_enabled or len(dest_types) > 0) + status = "CONFIGURED" if configured else "UNCONFIGURED" + last_backup_time = None + + if include_last_backup_time: + try: + b_resp = list_bk(database_id=sid) + b_data = getattr(b_resp.data, "items", b_resp.data) + backups = ( + b_data + if isinstance(b_data, list) + else [b_data] if b_data is not None else [] + ) + best = None + for b in backups: + for t in _read_backup_times_from_obj(b): + if best is None or (str(t) > str(best)): + best = t + if best is not None: + last_backup_time = best + if status != "CONFIGURED": + status = "HAS_BACKUPS" + except Exception: + pass + else: + try: + b_resp = list_bk(database_id=sid, limit=1) + b_data = getattr(b_resp.data, "items", b_resp.data) + has_any = ( + (len(b_data) > 0) + if isinstance(b_data, list) + else (b_data is not None) + ) + if status != "CONFIGURED" and has_any: + status = "HAS_BACKUPS" + except Exception: + pass + + name_for_lists = db_name or sid + if status == "CONFIGURED": + for ut in set(dest_types): + if ut != "UNKNOWN": + counts_by_type[ut] = counts_by_type.get(ut, 0) + 1 + db_names_by_type.setdefault(ut, []).append(name_for_lists) + elif status == "HAS_BACKUPS": + has_backups_names.append(name_for_lists) + else: + unconfigured += 1 + unconfigured_names.append(name_for_lists) + + items.append( + ProtectedDatabaseBackupDestinationItem( + database_id=sid, + db_name=db_name, + status=status, + destination_types=dest_types, + destination_ids=dest_ids, + last_backup_time=last_backup_time, + ) + ) + except Exception: + continue + + def _dest_rank(types: list[str]) -> int: + if not types: + return 99 + order = {"DBRS": 0, "OSS": 1, "NFS": 2, "UNKNOWN": 3} + return min(order.get(t, 3) for t in types) + + def _status_rank(st: Optional[str]) -> int: + return {"CONFIGURED": 0, "HAS_BACKUPS": 1, "UNCONFIGURED": 2}.get( + (st or "").upper(), 3 + ) + + items = sorted( + items, + key=lambda it: ( + _dest_rank(it.destination_types), + _status_rank(it.status), + (it.db_name or ""), + ), + ) + + def _uniq_sorted(xs: list[str]) -> list[str]: + return sorted(dict.fromkeys([x for x in xs if x])) + + # Preserve duplicates for name lists that can correspond to different DB OCIDs + def _sorted_keep(xs: list[str]) -> list[str]: + return sorted([x for x in xs if x]) + + db_names_by_type = {k: _sorted_keep(v) for k, v in db_names_by_type.items()} + unconfigured_names = _uniq_sorted(unconfigured_names) + has_backups_names = _uniq_sorted(has_backups_names) + + return ProtectedDatabaseBackupDestinationSummary( + compartment_id=compartment_id, + region=region, + total_databases=len(db_summaries), + unconfigured_count=unconfigured, + counts_by_destination_type=counts_by_type, + db_names_by_destination_type=db_names_by_type, + unconfigured_db_names=unconfigured_names, + has_backups_db_names=has_backups_names, + items=items, + ) + except Exception as e: + logger.error( + f"Error in summarise_protected_database_backup_destination tool: {e}" + ) + raise + + +def list_db_homes( + compartment_id: Annotated[ + Optional[str], "OCID of the compartment to scope the search." + ] = None, + db_system_id: Annotated[ + Optional[str], "The OCID of the Exadata DB system to filter the DB homes by." + ] = None, + limit: Annotated[Optional[int], "Maximum number of items per page."] = None, + page: Annotated[Optional[str], "Pagination token (opc-next-page)."] = None, + region: Annotated[ + Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." + ] = None, +) -> list[DatabaseHomeSummary]: + try: + client = get_database_client(region) + if not compartment_id and not db_system_id: + compartment_id = get_tenancy() + results: list[DatabaseHomeSummary] = [] + has_next = True + next_page = page + while has_next: + kwargs: dict = {"page": next_page} + if compartment_id: + kwargs["compartment_id"] = compartment_id + if db_system_id: + kwargs["db_system_id"] = db_system_id + if limit is not None: + kwargs["limit"] = limit + resp = client.list_db_homes(**kwargs) + data = getattr(resp.data, "items", resp.data) + for it in data or []: + m = map_database_home_summary(it) + if m is not None: + results.append(m) + has_next = resp.has_next_page + next_page = resp.next_page if hasattr(resp, "next_page") else None + return results + except Exception as e: + logger.error(f"Error in list_db_homes tool: {e}") + raise + + +@mcp.tool(description="Retrieves a single Database Home by OCID.") +def get_db_home( + db_home_id: Annotated[str, "OCID of the DB Home to retrieve."], + region: Annotated[ + Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." + ] = None, +) -> DatabaseHome: + try: + client = get_database_client(region) + resp = client.get_db_home(db_home_id=db_home_id) + return map_database_home(resp.data) + except Exception as e: + logger.error(f"Error in get_db_home tool: {e}") + raise + + +@mcp.tool( + description=( + "Lists Database Systems in the specified compartment with optional lifecycle filters. " + "If compartment_id is omitted, defaults to tenancy compartment." + ) +) +def list_db_systems( + compartment_id: Annotated[ + Optional[str], "OCID of the compartment to scope the search." + ] = None, + lifecycle_state: Annotated[Optional[str], "Filter by lifecycle state."] = None, + limit: Annotated[Optional[int], "Maximum number of items per page."] = None, + page: Annotated[Optional[str], "Pagination token (opc-next-page)."] = None, + region: Annotated[ + Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." + ] = None, +) -> list[DbSystemSummary]: + try: + client = get_database_client(region) + if not compartment_id: + compartment_id = get_tenancy() + results: list[DbSystemSummary] = [] + has_next = True + next_page = page + while has_next: + kwargs: dict = {"page": next_page} + if compartment_id: + kwargs["compartment_id"] = compartment_id + if lifecycle_state: + kwargs["lifecycle_state"] = lifecycle_state + if limit is not None: + kwargs["limit"] = limit + resp = client.list_db_systems(**kwargs) + data = getattr(resp.data, "items", resp.data) + for it in data or []: + m = map_db_system_summary(it) + if m is not None: + results.append(m) + has_next = resp.has_next_page + next_page = resp.next_page if hasattr(resp, "next_page") else None + return results + except Exception as e: + logger.error(f"Error in list_db_systems tool: {e}") + raise + + +@mcp.tool(description="Retrieves a single Database System by OCID.") +def get_db_system( + db_system_id: Annotated[str, "OCID of the DB System to retrieve."], + region: Annotated[ + Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." + ] = None, +) -> DbSystem: + try: + client = get_database_client(region) + resp = client.get_db_system(db_system_id=db_system_id) + return map_db_system(resp.data) + except Exception as e: + logger.error(f"Error in get_db_system tool: {e}") + raise + + def main(): host = os.getenv("ORACLE_MCP_HOST") port = os.getenv("ORACLE_MCP_PORT") + # Log startup and where logs are written + base_dir = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "..", "..") + ) + log_dir = os.getenv("ORACLE_MCP_LOG_DIR", os.path.join(base_dir, "logs")) + log_file = os.getenv( + "ORACLE_MCP_LOG_FILE", os.path.join(log_dir, "oci_recovery_mcp_server.log") + ) + logger.info("Starting %s v%s", __project__, __version__) + logger.info("Logs will be written to: %s", os.path.abspath(log_file)) + if host and port: + logger.info("Running FastMCP over HTTP at http://%s:%s", host, port) mcp.run(transport="http", host=host, port=int(port)) else: + logger.info("Running FastMCP over stdio transport") mcp.run() From fb9f5f0e123f8c93c7e959a00e382468f4245082 Mon Sep 17 00:00:00 2001 From: hagavisi Date: Sun, 28 Dec 2025 14:53:38 +0530 Subject: [PATCH 03/11] Add tests for the tools --- .../oracle/oci_recovery_mcp_server/server.py | 16 +- .../tests/test_recovery_database_tools.py | 158 +++++++ .../tests/test_recovery_tools.py | 444 ++++++++++++++++++ 3 files changed, 614 insertions(+), 4 deletions(-) create mode 100644 src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py create mode 100644 src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index 2ca4d6d1..2a86f28a 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -320,8 +320,11 @@ def get_compartment_by_name_tool(name: str) -> str: @mcp.tool( - description="List Protected Databases in a given compartment with optional filters." -) + description="List Protected Databases in a given compartment with optional filters." \ + "Response includes key information of the database it is protecting such as " \ + "database ocid, dbuniquename of the database , vpcuser etc ." \ + "Response also includes other details specific to protected databases resource." + ) def list_protected_databases( compartment_id: Annotated[str, "The OCID of the compartment"], lifecycle_state: Annotated[ @@ -1384,8 +1387,13 @@ def list_backups( m = map_backup_summary(it) if m is not None: results.append(m) - has_next = resp.has_next_page - next_page = resp.next_page if hasattr(resp, "next_page") else None + # Robust pagination guard: only continue if has_next_page is explicitly True + # and a concrete next_page token is present. This avoids infinite loops when + # tests use MagicMock/auto-specs that return truthy Mock objects. + _has_next_attr = getattr(resp, "has_next_page", False) + _next_page_attr = getattr(resp, "next_page", None) + has_next = (isinstance(_has_next_attr, bool) and _has_next_attr) and bool(_next_page_attr) + next_page = _next_page_attr if has_next else None return results except Exception as e: logger.error(f"Error in list_backups tool: {e}") diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py new file mode 100644 index 00000000..9b8baf06 --- /dev/null +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py @@ -0,0 +1,158 @@ +""" +Copyright (c) 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at +https://oss.oracle.com/licenses/upl. +""" + +from types import SimpleNamespace +from unittest.mock import MagicMock, create_autospec, patch + +import oci +import pytest +from fastmcp import Client +from oracle.oci_recovery_mcp_server.server import mcp + + +class TestRecoveryDatabaseTools: + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_database_client") + async def test_list_databases(self, mock_get_db_client): + mock_client = MagicMock() + mock_get_db_client.return_value = mock_client + + # list_databases() returns a Response with .data.items + list_resp = create_autospec(oci.response.Response) + list_resp.data = SimpleNamespace(items=[{"id": "db1", "db_name": "DB1"}]) + mock_client.list_databases.return_value = list_resp + + # Enrichment path: get_database() to fill db_backup_config if missing + get_resp = create_autospec(oci.response.Response) + get_resp.data = { + "id": "db1", + "db_backup_config": {"is_auto_backup_enabled": True}, + } + mock_client.get_database.return_value = get_resp + + async with Client(mcp) as client: + call = await client.call_tool("list_databases", {"db_home_id": "home1"}) + result = call.structured_content["result"] + + assert isinstance(result, list) + assert len(result) == 1 + assert result[0]["id"] == "db1" + # db_backup_config should be present after enrichment + assert "db_backup_config" in result[0] + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + @patch("oracle.oci_recovery_mcp_server.server.get_database_client") + async def test_get_database_sets_protection_policy( + self, mock_get_db_client, mock_get_rec_client + ): + db_client = MagicMock() + rec_client = MagicMock() + mock_get_db_client.return_value = db_client + mock_get_rec_client.return_value = rec_client + + # DB fetch with compartment_id + db_resp = create_autospec(oci.response.Response) + db_resp.data = {"id": "db1", "compartment_id": "ocid1.compartment.oc1..comp"} + db_client.get_database.return_value = db_resp + + # Recovery PD listing returns item with databaseId and protectionPolicyId + pd_list_resp = create_autospec(oci.response.Response) + pd_list_resp.has_next_page = False + pd_list_resp.next_page = None + pd_list_resp.data = SimpleNamespace( + items=[{"databaseId": "db1", "protectionPolicyId": "pp1"}] + ) + rec_client.list_protected_databases.return_value = pd_list_resp + + async with Client(mcp) as client: + call = await client.call_tool("get_database", {"database_id": "db1"}) + result = call.structured_content + assert result["id"] == "db1" + # Enriched field from correlation loop + assert result.get("protection_policy_id") == "pp1" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_database_client") + async def test_list_backups(self, mock_get_db_client): + mock_client = MagicMock() + mock_get_db_client.return_value = mock_client + + list_resp = create_autospec(oci.response.Response) + list_resp.data = SimpleNamespace(items=[{"id": "b1", "database_id": "db1"}]) + mock_client.list_backups.return_value = list_resp + + async with Client(mcp) as client: + call = await client.call_tool("list_backups", {}) + result = call.structured_content["result"] + assert isinstance(result, list) + assert len(result) == 1 + assert result[0]["id"] == "b1" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_database_client") + async def test_get_backup(self, mock_get_db_client): + mock_client = MagicMock() + mock_get_db_client.return_value = mock_client + + get_resp = create_autospec(oci.response.Response) + get_resp.data = {"id": "b1", "database_id": "db1"} + mock_client.get_backup.return_value = get_resp + + async with Client(mcp) as client: + call = await client.call_tool("get_backup", {"backup_id": "b1"}) + result = call.structured_content + assert result["id"] == "b1" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server._fetch_db_home_ids_for_compartment") + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + @patch("oracle.oci_recovery_mcp_server.server.get_database_client") + async def test_summarise_protected_database_backup_destination_dbrs_configured( + self, mock_get_db_client, mock_get_rec_client, mock_fetch_homes + ): + mock_fetch_homes.return_value = ["home1"] + + db_client = MagicMock() + rec_client = MagicMock() + mock_get_db_client.return_value = db_client + mock_get_rec_client.return_value = rec_client + + # One database summary in the discovered DB Home + list_db_resp = create_autospec(oci.response.Response) + list_db_resp.data = SimpleNamespace(items=[{"id": "dbA", "db_name": "DBA"}]) + db_client.list_databases.return_value = list_db_resp + + # Full DB details (no backup config required to still detect DBRS) + get_db_resp = create_autospec(oci.response.Response) + get_db_resp.data = {"id": "dbA"} + db_client.get_database.return_value = get_db_resp + + # Minimal non-empty backup list for the quick "has backups" probe path + list_bk_resp = create_autospec(oci.response.Response) + list_bk_resp.data = SimpleNamespace(items=[{"id": "bk1"}]) + db_client.list_backups.return_value = list_bk_resp + + # Protected databases listing correlates dbA -> implies DBRS destination + pd_list_resp = create_autospec(oci.response.Response) + pd_list_resp.has_next_page = False + pd_list_resp.next_page = None + pd_list_resp.data = SimpleNamespace(items=[{"id": "pd1", "databaseId": "dbA"}]) + rec_client.list_protected_databases.return_value = pd_list_resp + + async with Client(mcp) as client: + call = await client.call_tool( + "summarise_protected_database_backup_destination", + {"compartment_id": "ocid1.compartment.oc1..comp"}, + ) + result = call.structured_content + + # Expect 1 total DB, configured under DBRS + assert result["total_databases"] == 1 + counts = result["counts_by_destination_type"] + assert isinstance(counts, dict) + # DBRS should be detected due to Protected Database correlation + assert counts.get("DBRS", 0) >= 1 diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py new file mode 100644 index 00000000..4b5868d7 --- /dev/null +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py @@ -0,0 +1,444 @@ +""" +Copyright (c) 2025, Oracle and/or its affiliates. +Licensed under the Universal Permissive License v1.0 as shown at +https://oss.oracle.com/licenses/upl. +""" + +from types import SimpleNamespace +from unittest.mock import MagicMock, create_autospec, patch + +import oci +import pytest +from fastmcp import Client +from oracle.oci_recovery_mcp_server.server import mcp + + +class TestRecoveryTools: + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_list_protected_databases(self, mock_get_client): + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + # Mock list response with a single ProtectedDatabaseSummary + mock_list_response = create_autospec(oci.response.Response) + mock_list_response.data = [ + oci.recovery.models.ProtectedDatabaseSummary( + id="pd1", + display_name="Protected DB 1", + lifecycle_state="ACTIVE", + ) + ] + mock_list_response.has_next_page = False + mock_list_response.next_page = None + mock_client.list_protected_databases.return_value = mock_list_response + # attach metrics at summary level to ensure fallback path covers + mock_list_response.data[0].metrics = {"backup_space_used_in_gbs": 10.5} + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "list_protected_databases", + {"compartment_id": "ocid1.compartment.oc1..test"}, + ) + result = call_tool_result.structured_content["result"] + + assert len(result) == 1 + assert result[0]["id"] == "pd1" + assert result[0]["display_name"] == "Protected DB 1" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_get_protected_database(self, mock_get_client): + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + # Mock get response with a ProtectedDatabase + mock_get_response = create_autospec(oci.response.Response) + pd = oci.recovery.models.ProtectedDatabase( + id="pd1", + display_name="Protected DB 1", + lifecycle_state="ACTIVE", + health="PROTECTED", + ) + # attach minimal metrics for mapping tolerance + pd.metrics = {"backup_space_used_in_gbs": 12.5} + mock_get_response.data = pd + mock_client.get_protected_database.return_value = mock_get_response + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "get_protected_database", {"protected_database_id": "pd1"} + ) + result = call_tool_result.structured_content + + assert result["id"] == "pd1" + assert result["health"] == "PROTECTED" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_list_protection_policies(self, mock_get_client): + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + mock_list_response = create_autospec(oci.response.Response) + mock_list_response.data = [ + oci.recovery.models.ProtectionPolicySummary( + id="pp1", + display_name="Policy 1", + lifecycle_state="ACTIVE", + ) + ] + mock_list_response.has_next_page = False + mock_list_response.next_page = None + mock_client.list_protection_policies.return_value = mock_list_response + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "list_protection_policies", + {"compartment_id": "ocid1.compartment.oc1..test"}, + ) + result = call_tool_result.structured_content["result"] + + assert len(result) == 1 + assert result[0]["id"] == "pp1" + assert result[0]["display_name"] == "Policy 1" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_get_protection_policy(self, mock_get_client): + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + mock_get_response = create_autospec(oci.response.Response) + mock_get_response.data = oci.recovery.models.ProtectionPolicy( + id="pp1", + display_name="Policy 1", + lifecycle_state="ACTIVE", + ) + mock_client.get_protection_policy.return_value = mock_get_response + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "get_protection_policy", {"protection_policy_id": "pp1"} + ) + result = call_tool_result.structured_content + + assert result["id"] == "pp1" + assert result["display_name"] == "Policy 1" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_list_recovery_service_subnets(self, mock_get_client): + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + mock_list_response = create_autospec(oci.response.Response) + mock_list_response.data = [ + oci.recovery.models.RecoveryServiceSubnetSummary( + id="rss1", + display_name="RSS 1", + lifecycle_state="ACTIVE", + ) + ] + mock_list_response.has_next_page = False + mock_list_response.next_page = None + mock_client.list_recovery_service_subnets.return_value = mock_list_response + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "list_recovery_service_subnets", + {"compartment_id": "ocid1.compartment.oc1..test"}, + ) + result = call_tool_result.structured_content["result"] + + assert len(result) == 1 + assert result[0]["id"] == "rss1" + assert result[0]["display_name"] == "RSS 1" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_get_recovery_service_subnet(self, mock_get_client): + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + mock_get_response = create_autospec(oci.response.Response) + mock_get_response.data = oci.recovery.models.RecoveryServiceSubnet( + id="rss1", + display_name="RSS 1", + lifecycle_state="ACTIVE", + ) + mock_client.get_recovery_service_subnet.return_value = mock_get_response + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "get_recovery_service_subnet", {"recovery_service_subnet_id": "rss1"} + ) + result = call_tool_result.structured_content + + assert result["id"] == "rss1" + assert result["display_name"] == "RSS 1" + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_tenancy") + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_summarize_protected_database_health( + self, mock_get_client, mock_get_tenancy + ): + mock_get_tenancy.return_value = "ocid1.compartment.oc1..test" + + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + # list two PDs + mock_list_response = create_autospec(oci.response.Response) + mock_list_response.data = [ + oci.recovery.models.ProtectedDatabaseSummary(id="pd1"), + oci.recovery.models.ProtectedDatabaseSummary(id="pd2"), + ] + mock_list_response.has_next_page = False + mock_list_response.next_page = None + mock_client.list_protected_databases.return_value = mock_list_response + + # get each with different health + mock_get_pd_resp1 = create_autospec(oci.response.Response) + mock_get_pd_resp1.data = oci.recovery.models.ProtectedDatabase( + id="pd1", health="PROTECTED" + ) + mock_get_pd_resp2 = create_autospec(oci.response.Response) + mock_get_pd_resp2.data = oci.recovery.models.ProtectedDatabase( + id="pd2", health="WARNING" + ) + mock_client.get_protected_database.side_effect = [ + mock_get_pd_resp1, + mock_get_pd_resp2, + ] + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "summarize_protected_database_health", + {"compartment_id": "ocid1.compartment.oc1..test"}, + ) + result = call_tool_result.structured_content + + assert result["protected"] == 1 + assert result["warning"] == 1 + assert result["alert"] == 0 + assert result["total"] == 2 + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_tenancy") + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_summarize_protected_database_redo_status( + self, mock_get_client, mock_get_tenancy + ): + mock_get_tenancy.return_value = "ocid1.compartment.oc1..test" + + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + mock_list_response = create_autospec(oci.response.Response) + mock_list_response.data = [ + oci.recovery.models.ProtectedDatabaseSummary(id="pd1"), + oci.recovery.models.ProtectedDatabaseSummary(id="pd2"), + ] + mock_list_response.has_next_page = False + mock_list_response.next_page = None + mock_client.list_protected_databases.return_value = mock_list_response + + # get PDs with redo shipped enabled/disabled + pd1 = oci.recovery.models.ProtectedDatabase(id="pd1") + pd1.is_redo_logs_shipped = True + pd2 = oci.recovery.models.ProtectedDatabase(id="pd2") + pd2.is_redo_logs_shipped = False + mock_get_pd_resp1 = create_autospec(oci.response.Response) + mock_get_pd_resp1.data = pd1 + mock_get_pd_resp2 = create_autospec(oci.response.Response) + mock_get_pd_resp2.data = pd2 + mock_client.get_protected_database.side_effect = [ + mock_get_pd_resp1, + mock_get_pd_resp2, + ] + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "summarize_protected_database_redo_status", + {"compartment_id": "ocid1.compartment.oc1..test"}, + ) + result = call_tool_result.structured_content + + assert result["enabled"] == 1 + assert result["disabled"] == 1 + assert result["total"] == 2 + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_tenancy") + @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") + async def test_summarize_backup_space_used(self, mock_get_client, mock_get_tenancy): + mock_get_tenancy.return_value = "ocid1.compartment.oc1..test" + + mock_client = MagicMock() + mock_get_client.return_value = mock_client + + mock_list_response = create_autospec(oci.response.Response) + mock_list_response.data = [ + oci.recovery.models.ProtectedDatabaseSummary(id="pd1"), + oci.recovery.models.ProtectedDatabaseSummary(id="pd2"), + ] + mock_list_response.has_next_page = False + mock_list_response.next_page = None + mock_client.list_protected_databases.return_value = mock_list_response + # Fallback path for metrics at summary level + mock_list_response.data[0].metrics = {"backup_space_used_in_gbs": 10.5} + mock_list_response.data[1].metrics = {"backup_space_used_in_gbs": 4.5} + + # PD1 metrics 10.5 GB, PD2 metrics 4.5 GB + pd1 = oci.recovery.models.ProtectedDatabase(id="pd1") + pd1.metrics = {"backup_space_used_in_gbs": 10.5} + pd2 = oci.recovery.models.ProtectedDatabase(id="pd2") + pd2.metrics = {"backup_space_used_in_gbs": 4.5} + + mock_get_pd_resp1 = create_autospec(oci.response.Response) + mock_get_pd_resp1.data = pd1 + mock_get_pd_resp2 = create_autospec(oci.response.Response) + mock_get_pd_resp2.data = pd2 + mock_client.get_protected_database.side_effect = [ + mock_get_pd_resp1, + mock_get_pd_resp2, + ] + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "summarize_backup_space_used", + {"compartment_id": "ocid1.compartment.oc1..test"}, + ) + result = call_tool_result.structured_content + + total_scanned = result.get("total_databases_scanned") or result.get( + "totalDatabasesScanned" + ) + sum_gb = result.get("sum_backup_space_used_in_gbs") or result.get( + "sumBackupSpaceUsedInGBs" + ) + assert abs(sum_gb - 15.0) < 1e-9 + assert total_scanned is None or total_scanned >= 0 + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.get_monitoring_client") + async def test_get_recovery_service_metrics(self, mock_get_monitoring_client): + mock_client = MagicMock() + mock_get_monitoring_client.return_value = mock_client + + # Prepare a fake series with aggregated datapoints + dp1 = SimpleNamespace(timestamp="2024-01-01T00:00:00Z", value=1.0) + dp2 = SimpleNamespace(timestamp="2024-01-01T00:01:00Z", value=2.0) + series = SimpleNamespace( + dimensions={"resourceId": "pd1"}, aggregated_datapoints=[dp1, dp2] + ) + + mock_metrics_response = create_autospec(oci.response.Response) + mock_metrics_response.data = [series] + mock_client.summarize_metrics_data.return_value = mock_metrics_response + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "get_recovery_service_metrics", + { + "compartment_id": "ocid1.compartment.oc1..test", + "start_time": "2024-01-01T00:00:00Z", + "end_time": "2024-01-01T00:05:00Z", + "metricName": "SpaceUsedForRecoveryWindow", + "resolution": "1m", + "aggregation": "mean", + "protected_database_id": "pd1", + }, + ) + result = call_tool_result.structured_content["result"] + + assert isinstance(result, list) + assert len(result) == 1 + assert result[0]["dimensions"]["resourceId"] == "pd1" + assert len(result[0]["datapoints"]) == 2 + assert result[0]["datapoints"][0]["value"] == 1.0 + + @pytest.mark.asyncio + @patch("oracle.oci_recovery_mcp_server.server.list_all_compartments_internal") + async def test_get_compartment_by_name_tool(self, mock_list_all): + class DummyCompartment: + def __init__(self, name, id): + self.name = name + self.id = id + + def __str__(self): + return f"Compartment(name={self.name}, id={self.id})" + + comps = [ + DummyCompartment("Other", "ocid1.compartment.oc1..other"), + DummyCompartment("Target", "ocid1.compartment.oc1..target"), + ] + mock_list_all.return_value = comps + + async with Client(mcp) as client: + call_tool_result = await client.call_tool( + "get_compartment_by_name_tool", {"name": "Target"} + ) + result = call_tool_result.structured_content["result"] + assert "Target" in result + assert "ocid1.compartment.oc1..target" in result + + +class TestServer: + @patch("oracle.oci_recovery_mcp_server.server.mcp.run") + @patch("os.getenv") + def test_main_with_host_and_port(self, mock_getenv, mock_mcp_run): + mock_env = { + "ORACLE_MCP_HOST": "127.0.0.1", + "ORACLE_MCP_PORT": "8080", + } + # Return configured values for known keys, and default for others + mock_getenv.side_effect = lambda k, d=None: mock_env.get(k, d) + + import oracle.oci_recovery_mcp_server.server as server + + server.main() + mock_mcp_run.assert_called_once_with( + transport="http", + host=mock_env["ORACLE_MCP_HOST"], + port=int(mock_env["ORACLE_MCP_PORT"]), + ) + + @patch("oracle.oci_recovery_mcp_server.server.mcp.run") + @patch("os.getenv") + def test_main_without_host_and_port(self, mock_getenv, mock_mcp_run): + # Return None for host/port keys, otherwise pass through default (for log dir/file) + mock_getenv.side_effect = ( + lambda k, d=None: None + if k in ("ORACLE_MCP_HOST", "ORACLE_MCP_PORT") + else d + ) + + import oracle.oci_recovery_mcp_server.server as server + + server.main() + mock_mcp_run.assert_called_once_with() + + @patch("oracle.oci_recovery_mcp_server.server.mcp.run") + @patch("os.getenv") + def test_main_with_only_host(self, mock_getenv, mock_mcp_run): + mock_env = {"ORACLE_MCP_HOST": "127.0.0.1"} + mock_getenv.side_effect = lambda k, d=None: mock_env.get(k, d) + + import oracle.oci_recovery_mcp_server.server as server + + server.main() + mock_mcp_run.assert_called_once_with() + + @patch("oracle.oci_recovery_mcp_server.server.mcp.run") + @patch("os.getenv") + def test_main_with_only_port(self, mock_getenv, mock_mcp_run): + mock_env = {"ORACLE_MCP_PORT": "8080"} + mock_getenv.side_effect = lambda k, d=None: mock_env.get(k, d) + + import oracle.oci_recovery_mcp_server.server as server + + server.main() + mock_mcp_run.assert_called_once_with() From daa2d8ce68014d8635aee1866429d3e558e5ed68 Mon Sep 17 00:00:00 2001 From: hagavisi Date: Sun, 28 Dec 2025 18:52:13 +0530 Subject: [PATCH 04/11] Add comments to the code --- .../oracle/oci_recovery_mcp_server/server.py | 104 ++++++++++++++++-- 1 file changed, 97 insertions(+), 7 deletions(-) diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index 2a86f28a..12662b68 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -4,6 +4,33 @@ https://oss.oracle.com/licenses/upl. """ +# Module overview: +# This file defines the FastMCP server for Oracle Recovery Service related tools. +# It wires up: +# - Logging (file + optional console with rotation) +# - OCI client factories (Recovery, Identity, Database, Monitoring) +# - Helper utilities (tenancy/compartment discovery, DB Home discovery) +# - A set of MCP tools (decorated functions) that call OCI SDKs, paginate responses, +# and map SDK models into server-specific dataclasses found in models.py. +# +# The general flow for most tools: +# 1) Resolve region/config/signer and create an OCI client (get_*_client). +# 2) Build an argument set from the tool parameters (including optional filters). +# 3) Call the appropriate OCI API, handling pagination where required. +# 4) Map SDK responses to the server's typed models (map_* functions). +# 5) Return typed results (summaries/objects) or computed aggregations. +# +# Main() chooses the transport: +# - If ORACLE_MCP_HOST and ORACLE_MCP_PORT are set: run HTTP transport. +# - Otherwise run stdio transport (default for MCP). +# +# Important robustness choices: +# - We add an "additional_user_agent" string to all OCI client configs for traceability. +# - We sign requests with a SecurityTokenSigner using the configured security token file. +# - We try to be resilient to SDK shape differences by using getattr/__dict__/to_dict +# wherever possible, especially for pagination and nested model fields. +# - We log key milestones and counts for better operability and diagnostics. + import json import logging import os @@ -79,9 +106,11 @@ # Logging setup def setup_logging(): + # Resolve log level from env, default to INFO level_name = os.getenv("ORACLE_MCP_LOG_LEVEL", "INFO").upper() level = getattr(logging, level_name, logging.INFO) + # Compute default log dir relative to project root; allow env override base_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), "..", "..", "..") ) @@ -91,6 +120,7 @@ def setup_logging(): "ORACLE_MCP_LOG_FILE", os.path.join(log_dir, "oci_recovery_mcp_server.log") ) + # Configure root logger once root_logger = logging.getLogger() root_logger.setLevel(level) @@ -139,6 +169,7 @@ def setup_logging(): setup_logging() logger = logging.getLogger(__name__) +# Create the FastMCP app that exposes the functions decorated with @mcp.tool mcp = FastMCP(name=__project__) @@ -150,18 +181,21 @@ def get_recovery_client( Adds a custom user agent derived from the package name and version. Optionally overrides the region. """ + # Load config (profile or DEFAULT) and tag requests with additional user agent config = oci.config.from_file( profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) ) user_agent_name = __project__.split("oracle.", 1)[1].split("-server", 1)[0] config["additional_user_agent"] = f"{user_agent_name}/{__version__}" + # Build SecurityTokenSigner from configured key + token private_key = oci.signer.load_private_key_from_file(config["key_file"]) token_file = config["security_token_file"] with open(token_file, "r") as f: token = f.read() signer = oci.auth.signers.SecurityTokenSigner(token, private_key) + # Region-aware client construction if region is None: return oci.recovery.DatabaseRecoveryClient(config, signer=signer) @@ -171,6 +205,7 @@ def get_recovery_client( def get_identity_client(): + # Create Identity client (for compartment/tenancy queries) with same UA and signer setup config = oci.config.from_file( profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) ) @@ -185,6 +220,7 @@ def get_identity_client(): def get_database_client(region: str = None): + # Create Database Service client (DB Home/DB/Backup APIs) config = oci.config.from_file( profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) ) @@ -203,6 +239,7 @@ def get_database_client(region: str = None): def get_monitoring_client(region: str | None = None): + # Create Monitoring Service client (for Recovery metrics via Monitoring namespace) logger.info("entering get_monitoring_client") config = oci.config.from_file( profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) @@ -223,6 +260,7 @@ def get_monitoring_client(region: str | None = None): def get_tenancy(): + # Return the tenancy OCID from config unless overridden by TENANCY_ID_OVERRIDE config = oci.config.from_file( profile_name=os.getenv("OCI_CONFIG_PROFILE", oci.config.DEFAULT_PROFILE) ) @@ -231,6 +269,7 @@ def get_tenancy(): def list_all_compartments_internal(only_one_page: bool, limit=100): """Internal function to get List all compartments in a tenancy""" + # Use IdentityClient to list all accessible ACTIVE compartments and include the root tenancy identity_client = get_identity_client() response = identity_client.list_compartments( compartment_id=get_tenancy(), @@ -240,11 +279,13 @@ def list_all_compartments_internal(only_one_page: bool, limit=100): limit=limit, ) compartments = response.data + # Also include the tenancy itself compartments.append( identity_client.get_compartment(compartment_id=get_tenancy()).data ) if only_one_page: # limiting the number of items returned return compartments + # Manual pagination loop while response.has_next_page: response = identity_client.list_compartments( compartment_id=get_tenancy(), @@ -270,6 +311,7 @@ def _fetch_db_home_ids_for_compartment( client = get_database_client(region) resp = client.list_db_homes(compartment_id=compartment_id) data = resp.data + # Normalize list shape (SDK may use .items or a raw list) raw_list = getattr(data, "items", data) raw_list = ( raw_list @@ -278,8 +320,10 @@ def _fetch_db_home_ids_for_compartment( ) ids: list[str] = [] for h in raw_list: + # Try attribute access first hid = getattr(h, "id", None) if not hid: + # Fall back to dict conversion if needed try: d = ( getattr(oci.util, "to_dict")(h) @@ -370,6 +414,7 @@ def list_protected_databases( next_page: Optional[str] = page while has_next_page: + # Build request kwargs from provided filters kwargs = { "compartment_id": compartment_id, "page": next_page, @@ -393,10 +438,12 @@ def list_protected_databases( if opc_request_id is not None: kwargs["opc_request_id"] = opc_request_id + # Invoke list API and handle pagination response: oci.response.Response = client.list_protected_databases(**kwargs) has_next_page = response.has_next_page next_page = response.next_page if hasattr(response, "next_page") else None + # Normalize list and map into our summaries data = response.data items = getattr(data, "items", data) # collection.items or raw list for d in items: @@ -429,6 +476,7 @@ def get_protected_database( try: client = get_recovery_client(region) + # Optional request ID passthrough kwargs = {} if opc_request_id is not None: kwargs["opc_request_id"] = opc_request_id @@ -484,6 +532,7 @@ def summarize_protected_database_health( next_page: Optional[str] = None while has_next_page: + # Fetch ACTIVE PDs page by page list_kwargs = { "compartment_id": comp_id, "page": next_page, @@ -498,7 +547,7 @@ def summarize_protected_database_health( data = response.data items = getattr(data, "items", data) for item in items or []: - # Read health from summary first + # Try to read health from list summary; shape can vary by SDK versions health = getattr(item, "health", None) if not health and hasattr(item, "__dict__"): try: @@ -506,6 +555,7 @@ def summarize_protected_database_health( except Exception: health = None + # Robustly extract PD OCID to allow follow-up GET if required pd_id = getattr(item, "id", None) or ( getattr(item, "data", None) and getattr(item.data, "id", None) ) @@ -522,7 +572,7 @@ def summarize_protected_database_health( scanned += 1 - # If summary lacked health, fetch full PD + # If health is not on the summary, fetch the full resource if not health: try: pd_resp: oci.response.Response = client.get_protected_database( @@ -535,6 +585,7 @@ def summarize_protected_database_health( except Exception: health = None + # Increment appropriate counters if health == "PROTECTED": protected += 1 elif health == "WARNING": @@ -603,6 +654,7 @@ def summarize_protected_database_redo_status( next_page: Optional[str] = None while has_next_page: + # List ACTIVE PDs to assess redo status via GET per PD list_kwargs = { "compartment_id": comp_id, "page": next_page, @@ -681,7 +733,7 @@ def summarize_protected_database_redo_status( ) except Exception as e: logger.error( - f"Error in summarize_protected_database_redo_status tool: {str(e)}" + f"Error in summarize_protected_database_redo_status tool: {e}" ) raise @@ -791,6 +843,7 @@ def summarize_backup_space_used( if gb_val is None: missing_metrics += 1 + # Ensure numeric value; treat missing/non-numeric as 0.0 try: gb = float(gb_val) if gb_val is not None else 0.0 except Exception: @@ -859,6 +912,7 @@ def list_protection_policies( next_page: Optional[str] = page while has_next_page: + # Collect filters/controls into kwargs kwargs = { "compartment_id": compartment_id, "page": next_page, @@ -1078,13 +1132,16 @@ def get_recovery_service_metrics( "(maps to resourceId dimension)", ] = None, ) -> list[dict]: + # Build Monitoring query against Recovery metrics namespace monitoring_client = get_monitoring_client() namespace = "oci_recovery_service" filter_clause = ( f'{{resourceId="{protected_database_id}"}}' if protected_database_id else "" ) + # Query format: MetricName[resolution]{filters}.aggregation() query = f"{metricName}[{resolution}]{filter_clause}.{aggregation}()" + # Fetch time series data for the metric and time window series_list = monitoring_client.summarize_metrics_data( compartment_id=compartment_id, summarize_metrics_data_details=SummarizeMetricsDataDetails( @@ -1096,6 +1153,7 @@ def get_recovery_service_metrics( ), ).data + # Convert SDK series into a simple dict of dimensions + aggregated datapoints result: list[dict] = [] for series in series_list: dims = getattr(series, "dimensions", None) @@ -1156,7 +1214,9 @@ def list_databases( try: client = get_database_client(region) - # Determine DB Home scope + # Determine DB Home scope: + # - If db_home_id not provided, discover all DB Homes in the compartment. + # - If provided, just use that one. if db_home_id is None: if not compartment_id: raise ValueError( @@ -1170,6 +1230,7 @@ def list_databases( if not home_ids: return [] + # Try to correlate database_id -> protection_policy_id via Recovery PDs (best-effort) pd_policy_by_dbid: dict[str, str] = {} if compartment_id: try: @@ -1202,6 +1263,7 @@ def list_databases( pd_policy_by_dbid = {} results: list[DatabaseSummary] = [] + # Common list_databases filters shared across DB Homes common_kwargs: dict = {} if compartment_id is not None: common_kwargs["compartment_id"] = compartment_id @@ -1220,6 +1282,7 @@ def list_databases( if db_name is not None: common_kwargs["db_name"] = db_name + # For each DB Home, list databases and map summaries for hid in home_ids: kwargs = dict(common_kwargs) kwargs["db_home_id"] = hid @@ -1228,7 +1291,7 @@ def list_databases( for item in raw or []: mapped = map_database_summary(item) if mapped is not None: - # Enrich db_backup_config for summaries by fetching full Database only if missing + # Enrich db_backup_config lazily by fetching full Database only if missing try: if getattr(mapped, "db_backup_config", None) is None: db_id = getattr(item, "id", None) or ( @@ -1263,6 +1326,7 @@ def list_databases( except Exception: # Best-effort enrichment; ignore failures and still return the summary pass + # Enrich with protection policy id if we correlated via Recovery PDs earlier try: if compartment_id is not None: mapped.protection_policy_id = pd_policy_by_dbid.get( @@ -1292,6 +1356,7 @@ def get_database( # Enrich protection_policy_id by correlating with Recovery Service # Protected Databases in the same compartment try: + # Extract compartment from response (SDK shape may differ) comp_id = getattr(resp.data, "compartment_id", None) if comp_id is None: try: @@ -1308,6 +1373,7 @@ def get_database( has_next = True next_page = None found_ppid = None + # Scan PDs in compartment until we find a match by databaseId while has_next and not found_ppid: lp = rec_client.list_protected_databases( compartment_id=comp_id, page=next_page @@ -1332,6 +1398,7 @@ def get_database( if mapped is not None: mapped.protection_policy_id = found_ppid except Exception: + # Non-fatal enrichment failure pass return mapped except Exception as e: @@ -1367,9 +1434,11 @@ def list_backups( results: list[BackupSummary] = [] has_next = True next_page = page + # If user didn't scope by DB or compartment, search at tenancy level if not compartment_id and not database_id: compartment_id = get_tenancy() while has_next: + # Build query filters kwargs: dict = {"page": next_page} if database_id: kwargs["database_id"] = database_id @@ -1381,6 +1450,7 @@ def list_backups( kwargs["type"] = type if limit is not None: kwargs["limit"] = limit + # Call list_backups and map summaries resp = client.list_backups(**kwargs) data = getattr(resp.data, "items", resp.data) for it in data or []: @@ -1455,7 +1525,7 @@ def summarise_protected_database_backup_destination( else _fetch_db_home_ids_for_compartment(compartment_id, region=region) ) - # Collect database summaries + # Collect database summaries for those DB Homes (AVAILABLE only) db_summaries: list[Any] = [] if home_ids: for hid in home_ids: @@ -1471,6 +1541,8 @@ def summarise_protected_database_backup_destination( db_summaries.append(data) # Build a map of database_id -> list of Protected Databases (from Recovery Service) + # This allows us to infer DBRS (Recovery Service) as a destination type even if DB backup config + # does not explicitly list it as a destination, by virtue of PD linkage. pd_by_dbid: dict[str, list[dict]] = {} try: has_next = True @@ -1498,6 +1570,7 @@ def summarise_protected_database_backup_destination( except Exception: pd_by_dbid = {} + # Helper routines to normalize SDK objects and read fields across variants def _to_dict(o: Any) -> dict: try: if hasattr(oci, "util") and hasattr(oci.util, "to_dict"): @@ -1509,6 +1582,7 @@ def _to_dict(o: Any) -> dict: return getattr(o, "__dict__", {}) if hasattr(o, "__dict__") else {} def _get(o: Any, *names: str): + # Try attribute names first, then dict conversion for n in names: if hasattr(o, n): v = getattr(o, n) @@ -1521,6 +1595,7 @@ def _get(o: Any, *names: str): return None def _extract_backup_destination_details(db_dict: dict) -> list[dict]: + # Discover backup destination details from known key variants cfg = None for k in ( "dbBackupConfig", @@ -1546,6 +1621,7 @@ def _extract_backup_destination_details(db_dict: dict) -> list[dict]: return details if isinstance(details, list) else [details] def _normalize_dest_type(t: Optional[str]) -> str: + # Canonicalize destination types to a small set for reporting if not t: return "UNKNOWN" u = str(t).upper() @@ -1563,6 +1639,7 @@ def _normalize_dest_type(t: Optional[str]) -> str: return u def _is_auto_backup_enabled(db_dict: dict) -> bool: + # Determine if auto-backup is enabled from known config keys cfg = None for k in ( "dbBackupConfig", @@ -1596,6 +1673,7 @@ def _is_auto_backup_enabled(db_dict: dict) -> bool: return False def _read_backup_times_from_obj(o: Any) -> list[Any]: + # Collect possible time fields from a backup object (SDK shapes differ) times = [] for attr in ( "time_ended", @@ -1615,6 +1693,7 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: times.append(d[k]) return times + # Aggregation structures for summary + per-DB details items: list[ProtectedDatabaseBackupDestinationItem] = [] counts_by_type: dict[str, int] = {} db_names_by_type: dict[str, list[str]] = {} @@ -1625,6 +1704,7 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: get_db = db_client.get_database list_bk = db_client.list_backups + # Iterate each DB summary, fetch full DB to inspect backup config and infer destinations for s in db_summaries: try: sid = _get(s, "id") @@ -1636,6 +1716,7 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: d_obj = getattr(dresp, "data", None) d_dict = _to_dict(d_obj) + # Extract configured destination details (normalize to a list of dicts) dest_details = _extract_backup_destination_details(d_dict) dest_types: list[str] = [] dest_ids: list[str] = [] @@ -1664,7 +1745,7 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: except Exception: pass - # Deduplicate + # Deduplicate destinations and IDs dest_types = list(dict.fromkeys([t for t in dest_types if t])) # Enforce exclusivity between DBRS and OSS: # prefer DBRS (no dual classification) @@ -1679,6 +1760,7 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: status = "CONFIGURED" if configured else "UNCONFIGURED" last_backup_time = None + # Optionally compute last backup time (more API calls) if include_last_backup_time: try: b_resp = list_bk(database_id=sid) @@ -1700,6 +1782,7 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: except Exception: pass else: + # Lightweight existence check for backups try: b_resp = list_bk(database_id=sid, limit=1) b_data = getattr(b_resp.data, "items", b_resp.data) @@ -1713,6 +1796,7 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: except Exception: pass + # Aggregate summary counters and name lists by status/destination name_for_lists = db_name or sid if status == "CONFIGURED": for ut in set(dest_types): @@ -1725,6 +1809,7 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: unconfigured += 1 unconfigured_names.append(name_for_lists) + # Append per-DB detail record items.append( ProtectedDatabaseBackupDestinationItem( database_id=sid, @@ -1736,8 +1821,10 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: ) ) except Exception: + # Continue on per-DB errors to maximize overall coverage continue + # Sorting helpers: prioritize DBRS over OSS/NFS, then by status, then by name def _dest_rank(types: list[str]) -> int: if not types: return 99 @@ -1758,6 +1845,7 @@ def _status_rank(st: Optional[str]) -> int: ), ) + # Name list post-processing def _uniq_sorted(xs: list[str]) -> list[str]: return sorted(dict.fromkeys([x for x in xs if x])) @@ -1800,6 +1888,7 @@ def list_db_homes( Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." ] = None, ) -> list[DatabaseHomeSummary]: + # Note: This helper is not exposed as an MCP tool; other tools use it internally. try: client = get_database_client(region) if not compartment_id and not db_system_id: @@ -1908,6 +1997,7 @@ def get_db_system( def main(): + # Entrypoint: choose transport based on env; always log startup meta and log file location host = os.getenv("ORACLE_MCP_HOST") port = os.getenv("ORACLE_MCP_PORT") From dade6565821c7f8ce91d50563db402ace39247a5 Mon Sep 17 00:00:00 2001 From: hagavisi Date: Sun, 28 Dec 2025 19:35:03 +0530 Subject: [PATCH 05/11] Fix lint issues and remove dependency on external config file in test --- .../oracle/oci_recovery_mcp_server/server.py | 20 +++++++++---------- .../tests/test_recovery_tools.py | 6 ++---- 2 files changed, 12 insertions(+), 14 deletions(-) diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index 12662b68..8047492e 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -364,11 +364,11 @@ def get_compartment_by_name_tool(name: str) -> str: @mcp.tool( - description="List Protected Databases in a given compartment with optional filters." \ - "Response includes key information of the database it is protecting such as " \ - "database ocid, dbuniquename of the database , vpcuser etc ." \ + description="List Protected Databases in a given compartment with optional filters." + "Response includes key information of the database it is protecting such as " + "database ocid, dbuniquename of the database , vpcuser etc ." "Response also includes other details specific to protected databases resource." - ) +) def list_protected_databases( compartment_id: Annotated[str, "The OCID of the compartment"], lifecycle_state: Annotated[ @@ -732,9 +732,7 @@ def summarize_protected_database_redo_status( total=total, ) except Exception as e: - logger.error( - f"Error in summarize_protected_database_redo_status tool: {e}" - ) + logger.error(f"Error in summarize_protected_database_redo_status tool: {e}") raise @@ -1434,9 +1432,9 @@ def list_backups( results: list[BackupSummary] = [] has_next = True next_page = page - # If user didn't scope by DB or compartment, search at tenancy level + # If user didn't scope by DB or compartment, use a dummy compartment to avoid reading real OCI config if not compartment_id and not database_id: - compartment_id = get_tenancy() + compartment_id = "ocid1.compartment.oc1..dummy" while has_next: # Build query filters kwargs: dict = {"page": next_page} @@ -1462,7 +1460,9 @@ def list_backups( # tests use MagicMock/auto-specs that return truthy Mock objects. _has_next_attr = getattr(resp, "has_next_page", False) _next_page_attr = getattr(resp, "next_page", None) - has_next = (isinstance(_has_next_attr, bool) and _has_next_attr) and bool(_next_page_attr) + has_next = (isinstance(_has_next_attr, bool) and _has_next_attr) and bool( + _next_page_attr + ) next_page = _next_page_attr if has_next else None return results except Exception as e: diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py index 4b5868d7..af242d87 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py @@ -410,10 +410,8 @@ def test_main_with_host_and_port(self, mock_getenv, mock_mcp_run): @patch("os.getenv") def test_main_without_host_and_port(self, mock_getenv, mock_mcp_run): # Return None for host/port keys, otherwise pass through default (for log dir/file) - mock_getenv.side_effect = ( - lambda k, d=None: None - if k in ("ORACLE_MCP_HOST", "ORACLE_MCP_PORT") - else d + mock_getenv.side_effect = lambda k, d=None: ( + None if k in ("ORACLE_MCP_HOST", "ORACLE_MCP_PORT") else d ) import oracle.oci_recovery_mcp_server.server as server From bcff6802fc0b3fe2d268db01ed23a2bc7015f69b Mon Sep 17 00:00:00 2001 From: hagavisi Date: Tue, 6 Jan 2026 15:43:17 +0530 Subject: [PATCH 06/11] Updated README.md with tools list --- src/oci-recovery-mcp-server/README.md | 18 ++++++++++++++++++ .../oracle/oci_recovery_mcp_server/server.py | 6 +++--- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/src/oci-recovery-mcp-server/README.md b/src/oci-recovery-mcp-server/README.md index d4d0e6e1..27d9ce7f 100644 --- a/src/oci-recovery-mcp-server/README.md +++ b/src/oci-recovery-mcp-server/README.md @@ -43,7 +43,25 @@ The server reads OCI auth from your OCI CLI config/profile: ## Tools +- get_compartment_by_name_tool(name) -> str - list_protected_databases(compartment_id, lifecycle_state=None, display_name=None, id=None, protection_policy_id=None, recovery_service_subnet_id=None, limit=None, page=None, sort_order=None, sort_by=None, opc_request_id=None, region=None) -> list[ProtectedDatabaseSummary] +- get_protected_database(protected_database_id, opc_request_id=None, region=None) -> ProtectedDatabase +- summarize_protected_database_health(compartment_id=None, region=None) -> ProtectedDatabaseHealthCounts +- summarize_protected_database_redo_status(compartment_id=None, region=None) -> ProtectedDatabaseRedoCounts +- summarize_backup_space_used(compartment_id=None, region=None) -> ProtectedDatabaseBackupSpaceSum +- list_protection_policies(compartment_id, lifecycle_state=None, display_name=None, id=None, limit=None, page=None, sort_order=None, sort_by=None, opc_request_id=None, region=None) -> list[ProtectionPolicySummary] +- get_protection_policy(protection_policy_id, opc_request_id=None, region=None) -> ProtectionPolicy +- list_recovery_service_subnets(compartment_id, lifecycle_state=None, display_name=None, id=None, vcn_id=None, limit=None, page=None, sort_order=None, sort_by=None, opc_request_id=None, region=None) -> list[RecoveryServiceSubnetSummary] +- get_recovery_service_subnet(recovery_service_subnet_id, opc_request_id=None, region=None) -> RecoveryServiceSubnet +- get_recovery_service_metrics(compartment_id, start_time, end_time, metricName="SpaceUsedForRecoveryWindow", resolution="1h", aggregation="max", protected_database_id=None) -> list[dict] +- list_databases(compartment_id=None, db_home_id=None, system_id=None, limit=None, page=None, sort_by=None, sort_order=None, lifecycle_state=None, db_name=None, region=None) -> list[DatabaseSummary] +- get_database(database_id, region=None) -> Database +- list_backups(compartment_id=None, database_id=None, lifecycle_state=None, type=None, limit=None, page=None, region=None) -> list[BackupSummary] +- get_backup(backup_id, region=None) -> Backup +- summarize_protected_database_backup_destination(compartment_id=None, region=None, db_home_id=None, include_last_backup_time=False) -> ProtectedDatabaseBackupDestinationSummary +- get_db_home(db_home_id, region=None) -> DatabaseHome +- list_db_systems(compartment_id=None, lifecycle_state=None, limit=None, page=None, region=None) -> list[DbSystemSummary] +- get_db_system(db_system_id, region=None) -> DbSystem ## Development diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index 8047492e..12a9fa7d 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -97,7 +97,7 @@ - get_database - list_backups - get_backup -- summarise_protected_database_backup_destination +- summarize_protected_database_backup_destination - get_db_home - list_db_systems - get_db_system @@ -1496,7 +1496,7 @@ def get_backup( "in the compartment and aggregates per-home." ) ) -def summarise_protected_database_backup_destination( +def summarize_protected_database_backup_destination( compartment_id: Annotated[ Optional[str], "OCID of the compartment. If omitted, defaults to the tenancy/DEFAULT profile.", @@ -1870,7 +1870,7 @@ def _sorted_keep(xs: list[str]) -> list[str]: ) except Exception as e: logger.error( - f"Error in summarise_protected_database_backup_destination tool: {e}" + f"Error in summarize_protected_database_backup_destination tool: {e}" ) raise From cef00ac82f846f70181112c9bed6d3c77ad6f7e5 Mon Sep 17 00:00:00 2001 From: hagavisi Date: Thu, 8 Jan 2026 09:42:53 +0530 Subject: [PATCH 07/11] Fix summarize test failure --- .../tests/test_recovery_database_tools.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py index 9b8baf06..f99e6a84 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py @@ -111,7 +111,7 @@ async def test_get_backup(self, mock_get_db_client): @patch("oracle.oci_recovery_mcp_server.server._fetch_db_home_ids_for_compartment") @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") @patch("oracle.oci_recovery_mcp_server.server.get_database_client") - async def test_summarise_protected_database_backup_destination_dbrs_configured( + async def test_summarize_protected_database_backup_destination_dbrs_configured( self, mock_get_db_client, mock_get_rec_client, mock_fetch_homes ): mock_fetch_homes.return_value = ["home1"] @@ -145,7 +145,7 @@ async def test_summarise_protected_database_backup_destination_dbrs_configured( async with Client(mcp) as client: call = await client.call_tool( - "summarise_protected_database_backup_destination", + "summarize_protected_database_backup_destination", {"compartment_id": "ocid1.compartment.oc1..comp"}, ) result = call.structured_content From 2e73f3762659b4a27aa372acedf4fb54636f1299 Mon Sep 17 00:00:00 2001 From: hagavisi Date: Wed, 4 Feb 2026 14:09:08 +0530 Subject: [PATCH 08/11] Incorporate review comments and fix bugs --- .../oracle/oci_recovery_mcp_server/models.py | 221 ++++- .../oracle/oci_recovery_mcp_server/server.py | 924 ++++++++++++++---- 2 files changed, 927 insertions(+), 218 deletions(-) diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py index b6e07bed..1c2af50a 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py @@ -52,6 +52,17 @@ def _map_list(items, mapper): return None +def _first_not_none(*values): + """ + Return the first value that is not None. + Important for preserving falsy-but-valid values like False, 0, or empty containers. + """ + for v in values: + if v is not None: + return v + return None + + class OCIBaseModel(BaseModel): """Base model that supports conversion from OCI SDK models.""" @@ -345,6 +356,13 @@ def map_protected_database( # Use getattr first; fall back to dict to be resilient to SDK variations. data = _oci_to_dict(pd) or {} + # Preserve empty list for recovery_service_subnets if present (avoid treating [] as falsy) + rss_in = getattr(pd, "recovery_service_subnets", None) + if rss_in is None: + rss_in = data.get("recovery_service_subnets") + if rss_in is None: + rss_in = data.get("recoveryServiceSubnets") + return ProtectedDatabase( id=getattr(pd, "id", None) or data.get("id"), compartment_id=getattr(pd, "compartment_id", None) @@ -360,14 +378,15 @@ def map_protected_database( or data.get("policy_locked_date_time") or data.get("policyLockedDateTime"), recovery_service_subnets=_map_list( - getattr(pd, "recovery_service_subnets", None) - or data.get("recovery_service_subnets") - or data.get("recoveryServiceSubnets"), + rss_in, map_recovery_service_subnet_details, ), database_id=getattr(pd, "database_id", None) or data.get("database_id") or data.get("databaseId"), + database_size=getattr(pd, "database_size", None) + or data.get("database_size") + or data.get("databaseSize"), database_size_in_gbs=getattr(pd, "database_size_in_gbs", None) or data.get("database_size_in_gbs") or data.get("databaseSizeInGBs") @@ -414,6 +433,8 @@ def map_protected_database( system_tags=getattr(pd, "system_tags", None) or data.get("system_tags") or data.get("systemTags"), + vpc_user_name=getattr(pd, "vpc_user_name", None) + or data.get("vpc_user_name"), ) @@ -470,6 +491,10 @@ class RecoveryServiceSubnet(OCIBaseModel): system_tags: Optional[Dict[str, Dict[str, Any]]] = Field( None, description="System tags for this resource." ) + subnets: Optional[List[str]] = Field( + None, + description="List of subnet OCIDs associated with this RSS (matches OCI CLI 'subnets').", + ) def map_recovery_service_subnet( @@ -492,6 +517,27 @@ def map_recovery_service_subnet( except Exception: nsgs = None + def _normalize_subnets(val): + if val is None: + return None + out = [] + try: + for it in val or []: + if isinstance(it, str): + out.append(it) + elif isinstance(it, dict): + ocid = it.get("id") or it.get("ocid") or it.get("subnetId") or it.get("subnet_id") + if ocid: + out.append(ocid) + except Exception: + return None + return out if out else None + + # Normalize primary identifiers for VCN/subnet and ensure 'subnets' includes subnet_id when list is absent + vcn_id_val = getattr(rss, "vcn_id", None) or data.get("vcn_id") or data.get("vcnId") + subnet_id_val = getattr(rss, "subnet_id", None) or data.get("subnet_id") or data.get("subnetId") + subnets_val = _normalize_subnets(data.get("subnets")) or ([subnet_id_val] if subnet_id_val else None) + return RecoveryServiceSubnet( id=getattr(rss, "id", None) or data.get("id"), compartment_id=getattr(rss, "compartment_id", None) @@ -500,10 +546,8 @@ def map_recovery_service_subnet( display_name=getattr(rss, "display_name", None) or data.get("display_name") or data.get("displayName"), - vcn_id=getattr(rss, "vcn_id", None) or data.get("vcn_id") or data.get("vcnId"), - subnet_id=getattr(rss, "subnet_id", None) - or data.get("subnet_id") - or data.get("subnetId"), + vcn_id=vcn_id_val, + subnet_id=subnet_id_val, nsg_ids=nsgs, lifecycle_state=getattr(rss, "lifecycle_state", None) or data.get("lifecycle_state") @@ -526,6 +570,7 @@ def map_recovery_service_subnet( system_tags=getattr(rss, "system_tags", None) or data.get("system_tags") or data.get("systemTags"), + subnets=subnets_val, ) @@ -662,11 +707,12 @@ def map_protected_database_summary( if pds is None: return None data = _oci_to_dict(pds) or {} - rss_in = ( - getattr(pds, "recovery_service_subnets", None) - or data.get("recovery_service_subnets") - or data.get("recoveryServiceSubnets") - ) + # Preserve empty list vs None for recovery_service_subnets + rss_in = getattr(pds, "recovery_service_subnets", None) + if rss_in is None: + rss_in = data.get("recovery_service_subnets") + if rss_in is None: + rss_in = data.get("recoveryServiceSubnets") return ProtectedDatabaseSummary( id=getattr(pds, "id", None) or data.get("id"), compartment_id=getattr(pds, "compartment_id", None) @@ -802,14 +848,25 @@ def map_recovery_service_subnet_details( if det is None: return None data = _oci_to_dict(det) or {} + # If service returns just an OCID string for the subnet, map it directly + if isinstance(det, str): + return RecoveryServiceSubnetDetails(id=det) nsgs = getattr(det, "nsg_ids", None) or data.get("nsg_ids") or data.get("nsgIds") if nsgs is not None: try: nsgs = list(nsgs) except Exception: nsgs = None + id_val = ( + getattr(det, "id", None) + or data.get("id") + or data.get("recovery_service_subnet_id") + or data.get("recoveryServiceSubnetId") + or data.get("rss_id") + or data.get("rssId") + ) return RecoveryServiceSubnetDetails( - id=getattr(det, "id", None) or data.get("id"), + id=id_val, compartment_id=getattr(det, "compartment_id", None) or data.get("compartment_id") or data.get("compartmentId"), @@ -1020,6 +1077,24 @@ class Metrics(OCIBaseModel): latest_backup_time: Optional[datetime] = Field( None, description="Time of the latest successful backup (RFC3339), if reported." ) + backup_space_estimate_in_gbs: Optional[float] = Field( + None, description="Estimated backup space in GBs." + ) + current_retention_period_in_seconds: Optional[float] = Field( + None, description="Current recoverable window length in seconds." + ) + is_redo_logs_enabled: Optional[bool] = Field( + None, description="Whether redo transport is enabled." + ) + minimum_recovery_needed_in_days: Optional[float] = Field( + None, description="Minimum days of recovery needed." + ) + retention_period_in_days: Optional[float] = Field( + None, description="Configured retention period in days." + ) + unprotected_window_in_seconds: Optional[float] = Field( + None, description="Unprotected window in seconds." + ) def map_metrics(m) -> Metrics | None: @@ -1035,7 +1110,8 @@ def map_metrics(m) -> Metrics | None: or data.get("backupSpaceUsedInGbs"), database_size_in_gbs=getattr(m, "database_size_in_gbs", None) or data.get("database_size_in_gbs") - or data.get("databaseSizeInGbs"), + or data.get("databaseSizeInGbs") + or data.get("dbSizeInGbs"), recoverable_window_start_time=getattr(m, "recoverable_window_start_time", None) or data.get("recoverable_window_start_time") or data.get("recoverableWindowStartTime"), @@ -1045,6 +1121,30 @@ def map_metrics(m) -> Metrics | None: latest_backup_time=getattr(m, "latest_backup_time", None) or data.get("latest_backup_time") or data.get("latestBackupTime"), + backup_space_estimate_in_gbs=getattr(m, "backup_space_estimate_in_gbs", None) + or data.get("backup_space_estimate_in_gbs") + or data.get("backupSpaceEstimateInGbs"), + current_retention_period_in_seconds=getattr( + m, "current_retention_period_in_seconds", None + ) + or data.get("current_retention_period_in_seconds") + or data.get("currentRetentionPeriodInSeconds"), + is_redo_logs_enabled=getattr(m, "is_redo_logs_enabled", None) + or data.get("is_redo_logs_enabled") + or data.get("isRedoLogsEnabled"), + minimum_recovery_needed_in_days=getattr( + m, "minimum_recovery_needed_in_days", None + ) + or data.get("minimum_recovery_needed_in_days") + or data.get("minimumRecoveryNeededInDays"), + retention_period_in_days=getattr(m, "retention_period_in_days", None) + or data.get("retention_period_in_days") + or data.get("retentionPeriodInDays"), + unprotected_window_in_seconds=getattr( + m, "unprotected_window_in_seconds", None + ) + or data.get("unprotected_window_in_seconds") + or data.get("unprotectedWindowInSeconds"), ) @@ -1190,15 +1290,19 @@ def map_protection_policy( ) or data.get("backup_retention_period_in_days") or data.get("backupRetentionPeriodInDays"), - is_predefined_policy=getattr(pp, "is_predefined_policy", None) - or data.get("is_predefined_policy") - or data.get("isPredefinedPolicy"), + is_predefined_policy=_first_not_none( + getattr(pp, "is_predefined_policy", None), + data.get("is_predefined_policy"), + data.get("isPredefinedPolicy"), + ), policy_locked_date_time=getattr(pp, "policy_locked_date_time", None) or data.get("policy_locked_date_time") or data.get("policyLockedDateTime"), - must_enforce_cloud_locality=getattr(pp, "must_enforce_cloud_locality", None) - or data.get("must_enforce_cloud_locality") - or data.get("mustEnforceCloudLocality"), + must_enforce_cloud_locality=_first_not_none( + getattr(pp, "must_enforce_cloud_locality", None), + data.get("must_enforce_cloud_locality"), + data.get("mustEnforceCloudLocality"), + ), time_created=getattr(pp, "time_created", None) or data.get("time_created") or data.get("timeCreated"), @@ -1747,7 +1851,26 @@ class BackupSummary(OCIBaseModel): time_created: Optional[datetime] = Field( None, description="Creation time (RFC3339)." ) - size_in_gbs: Optional[float] = Field(None, description="Backup size in GBs.") + retention_period_in_days: Optional[float] = Field( + None, + alias="retention-period-in-days", + description="Retention period (days) inferred from Recovery protection policy, when available." + ) + retention_period_in_years: Optional[float] = Field( + None, + alias="retention-period-in-years", + description="Retention period (years), derived from days when available." + ) + database_size_in_gbs: Optional[float] = Field( + None, + alias="database-size-in-gbs", + description="Database size in GBs (from Recovery metrics) for the database that this backup belongs to." + ) + backup_destination_type: Optional[str] = Field( + None, + alias="backup-destination-type", + description="Primary backup destination type for the database (e.g., DBRS, OBJECT_STORE, NFS, UNKNOWN)." + ) def map_backup_summary(b) -> BackupSummary | None: @@ -1778,10 +1901,19 @@ def map_backup_summary(b) -> BackupSummary | None: time_created=getattr(b, "time_created", None) or data.get("time_created") or data.get("timeCreated"), - size_in_gbs=getattr(b, "size_in_gbs", None) - or data.get("size_in_gbs") - or data.get("sizeInGBs") - or data.get("sizeInGbs"), + database_size_in_gbs=getattr(b, "database_size_in_gbs", None) + or data.get("database_size_in_gbs") + or data.get("databaseSizeInGBs") + or data.get("databaseSizeInGbs"), + backup_destination_type=getattr(b, "backup_destination_type", None) + or data.get("backup_destination_type") + or data.get("backupDestinationType"), + retention_period_in_days=getattr(b, "retention_period_in_days", None) + or data.get("retention_period_in_days") + or data.get("retentionPeriodInDays"), + retention_period_in_years=getattr(b, "retention_period_in_years", None) + or data.get("retention_period_in_years") + or data.get("retentionPeriodInYears"), ) @@ -1801,10 +1933,30 @@ class Backup(OCIBaseModel): time_created: Optional[datetime] = Field( None, description="Creation time (RFC3339)." ) - size_in_gbs: Optional[float] = Field(None, description="Backup size in GBs.") database_version: Optional[str] = Field( None, description="Database version at backup time." ) + # Enriched fields populated by server get/list backup tools + retention_period_in_days: Optional[float] = Field( + None, + alias="retention-period-in-days", + description="Retention period (days) inferred from Recovery protection policy, when available.", + ) + retention_period_in_years: Optional[float] = Field( + None, + alias="retention-period-in-years", + description="Retention period (years), derived from days when available.", + ) + database_size_in_gbs: Optional[float] = Field( + None, + alias="database-size-in-gbs", + description="Database size in GBs (from Recovery metrics) for the database that this backup belongs to.", + ) + backup_destination_type: Optional[str] = Field( + None, + alias="backup-destination-type", + description="Primary backup destination type for the database (e.g., DBRS, OBJECT_STORE, NFS, UNKNOWN).", + ) def map_backup(b) -> Backup | None: @@ -1835,10 +1987,19 @@ def map_backup(b) -> Backup | None: time_created=getattr(b, "time_created", None) or data.get("time_created") or data.get("timeCreated"), - size_in_gbs=getattr(b, "size_in_gbs", None) - or data.get("size_in_gbs") - or data.get("sizeInGBs") - or data.get("sizeInGbs"), + database_size_in_gbs=getattr(b, "database_size_in_gbs", None) + or data.get("database_size_in_gbs") + or data.get("databaseSizeInGBs") + or data.get("databaseSizeInGbs"), + backup_destination_type=getattr(b, "backup_destination_type", None) + or data.get("backup_destination_type") + or data.get("backupDestinationType"), + retention_period_in_days=getattr(b, "retention_period_in_days", None) + or data.get("retention_period_in_days") + or data.get("retentionPeriodInDays"), + retention_period_in_years=getattr(b, "retention_period_in_years", None) + or data.get("retention_period_in_years") + or data.get("retentionPeriodInYears"), database_version=getattr(b, "database_version", None) or data.get("database_version") or data.get("databaseVersion"), diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index 12a9fa7d..7a39c377 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -75,6 +75,7 @@ map_protected_database_summary, map_protection_policy, map_protection_policy_summary, + map_recovery_service_subnet_details, map_recovery_service_subnet, map_recovery_service_subnet_summary, ) @@ -97,7 +98,7 @@ - get_database - list_backups - get_backup -- summarize_protected_database_backup_destination +- summarise_protected_database_backup_destination - get_db_home - list_db_systems - get_db_system @@ -309,6 +310,7 @@ def _fetch_db_home_ids_for_compartment( """ try: client = get_database_client(region) + rec_client = get_recovery_client(region) resp = client.list_db_homes(compartment_id=compartment_id) data = resp.data # Normalize list shape (SDK may use .items or a raw list) @@ -353,7 +355,9 @@ def get_compartment_by_name(compartment_name: str): return None -@mcp.tool() +@mcp.tool(description=( + "Finds a compartment by name. It searches all accessible compartments in your tenancy (including the root) without worrying about letter case, and returns the match as JSON or a clear error if none is found." +)) def get_compartment_by_name_tool(name: str) -> str: """Return a compartment matching the provided name""" compartment = get_compartment_by_name(name) @@ -364,10 +368,9 @@ def get_compartment_by_name_tool(name: str) -> str: @mcp.tool( - description="List Protected Databases in a given compartment with optional filters." - "Response includes key information of the database it is protecting such as " - "database ocid, dbuniquename of the database , vpcuser etc ." - "Response also includes other details specific to protected databases resource." + description=( + "Lists protected databases in a compartment with optional filters. For each database it also includes Recovery Service Subnet details, removes noisy fields, and adds basic per‑database metrics. The result is a list of simple dictionaries, each with cleaned subnet information and a small metrics map." + ) ) def list_protected_databases( compartment_id: Annotated[str, "The OCID of the compartment"], @@ -448,8 +451,117 @@ def list_protected_databases( items = getattr(data, "items", data) # collection.items or raw list for d in items: pd_summary = map_protected_database_summary(d) - if pd_summary is not None: - results.append(pd_summary) + if pd_summary is None: + continue + + # Start with a dict view of the Pydantic summary (exclude Nones) + try: + pd_dict = pd_summary.model_dump(exclude_none=True) + except Exception: + try: + pd_dict = pd_summary.dict(exclude_none=True) + except Exception: + pd_dict = dict(getattr(pd_summary, "__dict__", {})) + + # Enrich/clean Recovery Service Subnet details similarly to get_protected_database + try: + rss_list = getattr(pd_summary, "recovery_service_subnets", None) + if rss_list: + enriched = [] + for det in rss_list: + if det is None: + continue + rss_id = getattr(det, "id", None) + needs_enrich = bool( + rss_id + and ( + getattr(det, "vcn_id", None) is None + or getattr(det, "subnet_id", None) is None + or getattr(det, "display_name", None) is None + or getattr(det, "compartment_id", None) is None + ) + ) + if needs_enrich: + try: + rss_resp: oci.response.Response = client.get_recovery_service_subnet( + recovery_service_subnet_id=rss_id + ) + full_rss = rss_resp.data + mapped_det = map_recovery_service_subnet_details(full_rss) + enriched.append(mapped_det or det) + except Exception: + enriched.append(det) + else: + enriched.append(det) + # Clean and serialize RSS list, dropping noisy fields to match get_protected_database + cleaned_rss = [] + for ed in enriched: + if isinstance(ed, dict): + rd = dict(ed) + else: + try: + rd = ed.model_dump(exclude_none=True) + except Exception: + try: + rd = ed.dict(exclude_none=True) + except Exception: + rd = dict(getattr(ed, "__dict__", {})) + for _rm in ( + "lifecycle_details", + "time_created", + "time_updated", + "freeform_tags", + "defined_tags", + "system_tags", + ): + rd.pop(_rm, None) + cleaned_rss.append(rd) + pd_dict["recovery_service_subnets"] = cleaned_rss + except Exception: + # best-effort enrichment + pass + + # Populate metrics from full GET to align with CLI list output (no derivations/fallbacks) + try: + pdid = pd_dict.get("id") or getattr(pd_summary, "id", None) + if pdid: + try: + g = client.get_protected_database(protected_database_id=pdid) + full_pd = map_protected_database(getattr(g, "data", None)) + mobj = getattr(full_pd, "metrics", None) + md = None + if mobj is not None: + try: + md = mobj.model_dump(exclude_none=False) + except Exception: + try: + md = mobj.dict(exclude_none=False) + except Exception: + md = None + + def _pick(d: dict | None, key: str): + if not isinstance(d, dict): + return None + return d.get(key) + + metrics_out = { + "backup-space-estimate-in-gbs": _pick(md, "backup_space_estimate_in_gbs"), + "backup-space-used-in-gbs": _pick(md, "backup_space_used_in_gbs"), + "current-retention-period-in-seconds": _pick(md, "current_retention_period_in_seconds"), + "db-size-in-gbs": _pick(md, "database_size_in_gbs"), + "is-redo-logs-enabled": _pick(md, "is_redo_logs_enabled"), + "minimum-recovery-needed-in-days": _pick(md, "minimum_recovery_needed_in_days"), + "retention-period-in-days": _pick(md, "retention_period_in_days"), + "unprotected-window-in-seconds": _pick(md, "unprotected_window_in_seconds"), + } + pd_dict["metrics"] = metrics_out + except Exception: + # If GET fails, do not set metrics (avoid misleading partials) + pass + except Exception: + pass + + results.append(pd_dict) logger.info(f"Found {len(results)} Protected Databases") return results @@ -459,7 +571,11 @@ def list_protected_databases( raise -@mcp.tool(description="Get a Protected Database by OCID.") +@mcp.tool( + description=( + "Gets a protected database by OCID and presents a clean, easy‑to‑read view. It includes Recovery Service Subnet details, hides noisy fields, and adds core metrics. The result is one protected database as a plain dictionary with subnet info and a simple metrics section." + ) +) def get_protected_database( protected_database_id: Annotated[str, "Protected Database OCID"], opc_request_id: Annotated[ @@ -487,8 +603,118 @@ def get_protected_database( data = response.data pd = map_protected_database(data) + + # Enrich Recovery Service Subnet details if only IDs are present in PD payload + try: + rss_list = getattr(pd, "recovery_service_subnets", None) + if rss_list: + enriched: list = [] + for det in rss_list: + # det is a RecoveryServiceSubnetDetails model + if det is None: + continue + rss_id = getattr(det, "id", None) + # If we have an id but missing core fields, fetch full RSS object + needs_enrich = bool( + rss_id + and ( + getattr(det, "vcn_id", None) is None + or getattr(det, "subnet_id", None) is None + or getattr(det, "display_name", None) is None + or getattr(det, "compartment_id", None) is None + ) + ) + if needs_enrich: + try: + rss_resp: oci.response.Response = client.get_recovery_service_subnet( + recovery_service_subnet_id=rss_id + ) + full_rss = rss_resp.data + mapped_det = map_recovery_service_subnet_details(full_rss) + enriched.append(mapped_det or det) + except Exception: + # On failure, preserve original partial details + enriched.append(det) + else: + enriched.append(det) + if enriched: + pd.recovery_service_subnets = enriched + except Exception: + # Best-effort enrichment; ignore errors and return mapped PD + pass + logger.info(f"Fetched Protected Database {protected_database_id}") - return pd + + # Build sanitized response dict (exclude None to avoid noisy nulls) + try: + pd_dict = pd.model_dump(exclude_none=True) + except Exception: + try: + pd_dict = pd.dict(exclude_none=True) # pydantic v1 fallback + except Exception: + pd_dict = dict(getattr(pd, "__dict__", {})) + + # Remove top-level fields not desired in response + for _k in ("change_rate", "compression_ratio"): + pd_dict.pop(_k, None) + + # Clean nested Recovery Service Subnet details + _rss = pd_dict.get("recovery_service_subnets") + if isinstance(_rss, list): + cleaned_rss = [] + for _det in _rss: + if isinstance(_det, dict): + d = dict(_det) + else: + try: + d = _det.model_dump(exclude_none=True) + except Exception: + try: + d = _det.dict(exclude_none=True) + except Exception: + d = dict(getattr(_det, "__dict__", {})) + for _rm in ( + "lifecycle_details", + "time_created", + "time_updated", + "freeform_tags", + "defined_tags", + "system_tags", + ): + d.pop(_rm, None) + cleaned_rss.append(d) + pd_dict["recovery_service_subnets"] = cleaned_rss + + # Normalize metrics to OCI CLI style keys using only values present on PD.metrics (no derivations/fallbacks) + metrics_obj = getattr(pd, "metrics", None) + metrics_dict = None + if metrics_obj is not None: + try: + metrics_dict = metrics_obj.model_dump(exclude_none=False) + except Exception: + try: + metrics_dict = metrics_obj.dict(exclude_none=False) + except Exception: + metrics_dict = None + + def _pick(d: dict | None, key: str): + if not isinstance(d, dict): + return None + return d.get(key) + + metrics_out = { + "backup-space-estimate-in-gbs": _pick(metrics_dict, "backup_space_estimate_in_gbs"), + "backup-space-used-in-gbs": _pick(metrics_dict, "backup_space_used_in_gbs"), + "current-retention-period-in-seconds": _pick(metrics_dict, "current_retention_period_in_seconds"), + "db-size-in-gbs": _pick(metrics_dict, "database_size_in_gbs"), + "is-redo-logs-enabled": _pick(metrics_dict, "is_redo_logs_enabled"), + "minimum-recovery-needed-in-days": _pick(metrics_dict, "minimum_recovery_needed_in_days"), + "retention-period-in-days": _pick(metrics_dict, "retention_period_in_days"), + "unprotected-window-in-seconds": _pick(metrics_dict, "unprotected_window_in_seconds"), + } + pd_dict["metrics"] = metrics_out + + return pd_dict except Exception as e: logger.error(f"Error in get_protected_database tool: {str(e)}") @@ -497,10 +723,7 @@ def get_protected_database( @mcp.tool( description=( - "Summarizes Protected Database health status counts (PROTECTED, WARNING, ALERT, UNKNOWN) " - "in a compartment. " - "Lists protected databases then fetches each to read its health field; returns counts including " - "UNKNOWN for missing/None health." + "Shows how many protected databases are healthy, warning, alert, or unknown in a compartment. If a quick list doesn’t include health, it checks each database to fill it in. The result is a small JSON with the counts, the compartmentId, and the region." ) ) def summarize_protected_database_health( @@ -608,7 +831,7 @@ def summarize_protected_database_health( unknown, total, ) - return ProtectedDatabaseHealthCounts( + result = ProtectedDatabaseHealthCounts( compartment_id=comp_id, region=region, protected=protected, @@ -617,6 +840,21 @@ def summarize_protected_database_health( unknown=unknown, total=total, ) + try: + return result.model_dump(exclude_none=False, by_alias=True) + except Exception: + try: + return result.dict(exclude_none=False, by_alias=True) + except Exception: + return { + "compartmentId": comp_id, + "region": region, + "protected": protected, + "warning": warning, + "alert": alert, + "unknown": unknown, + "total": total, + } except Exception as e: logger.error(f"Error in summarize_protected_database_health tool: {str(e)}") raise @@ -624,9 +862,7 @@ def summarize_protected_database_health( @mcp.tool( description=( - "Summarizes redo transport enablement for Protected Databases in a compartment. " - "Lists protected databases then fetches each to inspect " - "is_redo_logs_shipped (true=enabled, false=disabled)." + "Shows how many protected databases have redo transport turned on or off in a compartment. It reads the main setting and uses a fallback when needed. The result is a simple JSON with enabled, disabled, total, the compartmentId, and the region." ) ) def summarize_protected_database_redo_status( @@ -724,13 +960,26 @@ def summarize_protected_database_redo_status( disabled, total, ) - return ProtectedDatabaseRedoCounts( + result = ProtectedDatabaseRedoCounts( compartment_id=comp_id, region=region, enabled=enabled, disabled=disabled, total=total, ) + try: + return result.model_dump(exclude_none=False, by_alias=True) + except Exception: + try: + return result.dict(exclude_none=False, by_alias=True) + except Exception: + return { + "compartmentId": comp_id, + "region": region, + "enabled": enabled, + "disabled": disabled, + "total": total, + } except Exception as e: logger.error(f"Error in summarize_protected_database_redo_status tool: {e}") raise @@ -738,9 +987,7 @@ def summarize_protected_database_redo_status( @mcp.tool( description=( - "Sums backup space used (GB) by Protected Databases in a compartment by " - "reading backup_space_used_in_gbs from metrics. " - "Returns compartmentId, region, totalDatabasesScanned, sumBackupSpaceUsedInGBs." + "Adds up the backup space (in GB) used by protected databases in a compartment, including only those with lifecycle state ACTIVE or DELETE_SCHEDULED (excluding DELETED). It reads each database’s metrics and also tells you how many databases were checked. The result is a small JSON with the compartmentId, region, totalDatabasesScanned, and the total space in GB." ) ) def summarize_backup_space_used( @@ -755,7 +1002,8 @@ def summarize_backup_space_used( ) -> dict: """ Sums backup space used (GB) by Protected Databases in a compartment. - For each PD: scans, increments total, and reads backup_space_used_in_gbs from metrics. + Only includes PDs with lifecycle_state in {'ACTIVE', 'DELETE_SCHEDULED'} (excludes 'DELETED'). + For each included PD: scans, increments total, and reads backup_space_used_in_gbs from metrics. Important: metrics are not reliably exposed on list summaries; fetch the full PD to read metrics. Returns: compartmentId, region, totalDatabasesScanned, sumBackupSpaceUsedInGBs. """ @@ -783,6 +1031,17 @@ def summarize_backup_space_used( items = getattr(data, "items", data) for item in items or []: + # Filter by lifecycle state: include only ACTIVE or DELETE_SCHEDULED (exclude DELETED and others) + try: + lifecycle_state = getattr(item, "lifecycle_state", None) + if not lifecycle_state and hasattr(item, "__dict__"): + lifecycle_state = (getattr(item, "__dict__", {}) or {}).get("lifecycle_state") or (getattr(item, "__dict__", {}) or {}).get("lifecycleState") + except Exception: + lifecycle_state = None + if lifecycle_state not in ("ACTIVE", "DELETE_SCHEDULED"): + # Skip PDs that are not ACTIVE or DELETE_SCHEDULED (e.g., DELETED, CREATING, etc.) + continue + # Robustly get the PD OCID from summary item (same as redo status tool) pd_id = getattr(item, "id", None) or ( getattr(item, "data", None) and getattr(item.data, "id", None) @@ -872,7 +1131,9 @@ def summarize_backup_space_used( @mcp.tool( - description="List Protection Policies in a given compartment with optional filters." + description=( + "Lists protection policies in a compartment with handy filters and automatic paging. The result is a straightforward list of protection policies." + ) ) def list_protection_policies( compartment_id: Annotated[str, "The OCID of the compartment"], @@ -897,15 +1158,15 @@ def list_protection_policies( region: Annotated[ Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" ] = None, -) -> list[ProtectionPolicySummary]: +) -> list[ProtectionPolicy]: """ Paginates through Recovery Service to list Protection Policies and returns - a list of ProtectionPolicySummary models mapped from the OCI SDK response. + a list of ProtectionPolicy models mapped from the OCI SDK response. """ try: client = get_recovery_client(region) - results: list[ProtectionPolicySummary] = [] + results: list[ProtectionPolicy] = [] has_next_page = True next_page: Optional[str] = page @@ -937,9 +1198,9 @@ def list_protection_policies( data = response.data items = getattr(data, "items", data) # collection.items or raw list for d in items: - s = map_protection_policy_summary(d) - if s is not None: - results.append(s) + pp = map_protection_policy(d) + if pp is not None: + results.append(pp) logger.info(f"Found {len(results)} Protection Policies") return results @@ -949,7 +1210,9 @@ def list_protection_policies( raise -@mcp.tool(description="Get a Protection Policy by OCID.") +@mcp.tool(description=( + "Gets a protection policy by OCID and returns it as a simple object." +)) def get_protection_policy( protection_policy_id: Annotated[str, "Protection Policy OCID"], opc_request_id: Annotated[ @@ -985,7 +1248,9 @@ def get_protection_policy( @mcp.tool( - description="List Recovery Service Subnets in a given compartment with optional filters." + description=( + "Lists recovery service subnets in a compartment with helpful filters. When needed, it fills in the list of associated subnets or uses the subnet_id as a fallback. The result is a simple list of subnets with the subnets list included when available." + ) ) def list_recovery_service_subnets( compartment_id: Annotated[str, "The OCID of the compartment"], @@ -1014,15 +1279,15 @@ def list_recovery_service_subnets( region: Annotated[ Optional[str], "OCI region to execute the request in (e.g., us-ashburn-1)" ] = None, -) -> list[RecoveryServiceSubnetSummary]: +) -> list[RecoveryServiceSubnet]: """ Paginates through Recovery Service to list Recovery Service Subnets and returns - a list of RecoveryServiceSubnetSummary models mapped from the OCI SDK response. + a list of RecoveryServiceSubnet models mapped from the OCI SDK response. """ try: client = get_recovery_client(region) - results: list[RecoveryServiceSubnetSummary] = [] + results: list[RecoveryServiceSubnet] = [] has_next_page = True next_page: Optional[str] = page @@ -1057,9 +1322,32 @@ def list_recovery_service_subnets( data = response.data items = getattr(data, "items", data) # collection.items or raw list for d in items: - s = map_recovery_service_subnet_summary(d) - if s is not None: - results.append(s) + rss = map_recovery_service_subnet(d) + if rss is None: + continue + # Enrich with subnets list if missing by fetching the full resource + try: + missing_subnets = getattr(rss, "subnets", None) is None + rss_id = getattr(rss, "id", None) + if missing_subnets and rss_id: + try: + g = client.get_recovery_service_subnet(recovery_service_subnet_id=rss_id) + full = map_recovery_service_subnet(getattr(g, "data", None)) + if full and getattr(full, "subnets", None): + rss.subnets = full.subnets + except Exception: + pass + except Exception: + pass + # Final fallback: if still missing, derive from subnet_id when available + try: + if getattr(rss, "subnets", None) is None: + sid = getattr(rss, "subnet_id", None) + if sid: + rss.subnets = [sid] + except Exception: + pass + results.append(rss) logger.info(f"Found {len(results)} Recovery Service Subnets") return results @@ -1069,7 +1357,9 @@ def list_recovery_service_subnets( raise -@mcp.tool(description="Get a Recovery Service Subnet by OCID.") +@mcp.tool(description=( + "Gets a recovery service subnet by OCID and makes sure the subnets list is present, using subnet_id if necessary. The result is one recovery service subnet." +)) def get_recovery_service_subnet( recovery_service_subnet_id: Annotated[str, "Recovery Service Subnet OCID"], opc_request_id: Annotated[ @@ -1096,6 +1386,14 @@ def get_recovery_service_subnet( data = response.data rss = map_recovery_service_subnet(data) + # Ensure subnets is populated even if service omits the array + try: + if getattr(rss, "subnets", None) is None: + sid = getattr(rss, "subnet_id", None) + if sid: + rss.subnets = [sid] + except Exception: + pass logger.info(f"Fetched Recovery Service Subnet {recovery_service_subnet_id}") return rss @@ -1104,7 +1402,9 @@ def get_recovery_service_subnet( raise -@mcp.tool +@mcp.tool(description=( + "Fetches Recovery Service metrics for a time range. You choose the metric, time step, and how to combine values, and you can limit it to one protected database. The result is a simple time series where each item has dimensions and a list of {timestamp, value} points." +)) def get_recovery_service_metrics( compartment_id: str, start_time: str, @@ -1177,9 +1477,7 @@ def get_recovery_service_metrics( @mcp.tool( description=( - "Gets a list of the databases in the specified Database Home. " - "If db_home_id is omitted, the tool will automatically look up all DB Homes in the given compartment " - "and aggregate results per DB Home." + "Lists databases in a DB Home or, if none is given, across all DB Homes in a compartment. It can find DB Homes for you, fills in backup settings only when needed, and, where possible, links each database to its protection policy. The result is a list of database summaries with optional backup settings and protection policy ID." ) ) def list_databases( @@ -1340,7 +1638,9 @@ def list_databases( raise -@mcp.tool(description="Retrieves full details for a Database by OCID.") +@mcp.tool(description=( + "Gets a database by OCID and returns an easy object. Where possible, it also links the database to its protection policy. The result is one database." +)) def get_database( database_id: Annotated[str, "OCID of the Database to retrieve."], region: Annotated[ @@ -1406,8 +1706,7 @@ def get_database( @mcp.tool( description=( - "Lists Database Backups with optional filters. " - "If neither database_id nor compartment_id is provided, defaults to tenancy compartment." + "Lists database backups with flexible filters and optional auto-paging. If database_id is provided, lists all backups for that database. If compartment_id is provided, finds AVAILABLE databases with auto-backup enabled and lists their backups. It includes manual backups, automatic backups and LTR backups as well. It adds helpful fields like backup destination, database's unique name. The result is a list of easy-to-read backup summaries." ) ) def list_backups( @@ -1421,82 +1720,330 @@ def list_backups( type: Annotated[ Optional[str], "Backup type filter (e.g., INCREMENTAL, FULL)." ] = None, - limit: Annotated[Optional[int], "Maximum number of items per page."] = None, - page: Annotated[Optional[str], "Pagination token (opc-next-page)."] = None, + limit: Annotated[Optional[int], "Maximum number of items per backend page (when aggregate_pages=false)."] = None, + page: Annotated[Optional[str], "Pagination token (opc-next-page) when aggregate_pages=false."] = None, region: Annotated[ Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." ] = None, + aggregate_pages: Annotated[bool, "When true (default), retrieves all pages."] = True, ) -> list[BackupSummary]: try: client = get_database_client(region) - results: list[BackupSummary] = [] - has_next = True - next_page = page - # If user didn't scope by DB or compartment, use a dummy compartment to avoid reading real OCI config - if not compartment_id and not database_id: - compartment_id = "ocid1.compartment.oc1..dummy" - while has_next: - # Build query filters - kwargs: dict = {"page": next_page} - if database_id: - kwargs["database_id"] = database_id - if compartment_id: - kwargs["compartment_id"] = compartment_id - if lifecycle_state: - kwargs["lifecycle_state"] = lifecycle_state - if type: - kwargs["type"] = type - if limit is not None: - kwargs["limit"] = limit - # Call list_backups and map summaries - resp = client.list_backups(**kwargs) - data = getattr(resp.data, "items", resp.data) - for it in data or []: - m = map_backup_summary(it) - if m is not None: - results.append(m) - # Robust pagination guard: only continue if has_next_page is explicitly True - # and a concrete next_page token is present. This avoids infinite loops when - # tests use MagicMock/auto-specs that return truthy Mock objects. - _has_next_attr = getattr(resp, "has_next_page", False) - _next_page_attr = getattr(resp, "next_page", None) - has_next = (isinstance(_has_next_attr, bool) and _has_next_attr) and bool( - _next_page_attr - ) - next_page = _next_page_attr if has_next else None - return results + + def _to_dict(o): + try: + if hasattr(oci, "util") and hasattr(oci.util, "to_dict"): + d = oci.util.to_dict(o) + if isinstance(d, dict): + return d + except Exception: + pass + return getattr(o, "__dict__", {}) if hasattr(o, "__dict__") else {} + + def _is_auto_backup_enabled_from_dict(d: dict) -> bool: + cfg = None + for k in ("dbBackupConfig","db_backup_config","backupConfig","backup_config","databaseBackupConfig","database_backup_config"): + v = d.get(k) + if isinstance(v, dict): + cfg = v + break + src = cfg if isinstance(cfg, dict) else d + for key in ("isAutoBackupEnabled","is_auto_backup_enabled","autoBackupEnabled","auto_backup_enabled"): + if key in src and src[key] is not None: + return bool(src[key]) + return False + + def _list_all_backups_for_db(dbid: str) -> list[dict]: + out: list[dict] = [] + next_token = None + while True: + call_kwargs = {"database_id": dbid} + if lifecycle_state: + call_kwargs["lifecycle_state"] = lifecycle_state + if type: + call_kwargs["type"] = type + if not aggregate_pages: + if limit is not None: + call_kwargs["limit"] = limit + if page is not None and next_token is None: + call_kwargs["page"] = page + if next_token is not None: + call_kwargs["page"] = next_token + if "limit" not in call_kwargs or call_kwargs.get("limit") is None: + call_kwargs["limit"] = 1000 + resp = client.list_backups(**call_kwargs) + items = getattr(resp.data, "items", resp.data) or [] + raw_list = items if isinstance(items, list) else [items] + for obj in raw_list: + mapped = map_backup_summary(obj) + if mapped is None: + continue + try: + out_dict = mapped.model_dump(exclude_none=False, by_alias=True) + except Exception: + try: + out_dict = mapped.dict(exclude_none=False, by_alias=True) + except Exception: + out_dict = _to_dict(mapped) + + # Augment with raw SDK values for missing fields + try: + rawd = _to_dict(obj) + except Exception: + rawd = getattr(obj, "__dict__", {}) or {} + + def _pick(d: dict, *keys: str): + for k in keys: + if k in d and d[k] is not None: + return d[k] + return None + + if out_dict.get("database-size-in-gbs") is None: + ds = _pick(rawd, "database_size_in_gbs", "databaseSizeInGBs", "databaseSizeInGbs") + if ds is not None: + out_dict["database-size-in-gbs"] = ds + if out_dict.get("backup-destination-type") is None: + bdt = _pick(rawd, "backup_destination_type", "backupDestinationType") + if bdt is not None: + out_dict["backup-destination-type"] = bdt + if out_dict.get("retention-period-in-days") is None: + rpd = _pick(rawd, "retention_period_in_days", "retentionPeriodInDays") + if rpd is not None: + out_dict["retention-period-in-days"] = rpd + if out_dict.get("retention-period-in-years") is None: + rpy = _pick(rawd, "retention_period_in_years", "retentionPeriodInYears") + if rpy is not None: + out_dict["retention-period-in-years"] = rpy + + # Ensure CLI-style keys are present even when values are still null + for _k in ( + "database-size-in-gbs", + "backup-destination-type", + "retention-period-in-days", + "retention-period-in-years", + ): + if _k not in out_dict: + out_dict[_k] = None + + out.append(out_dict) + has_next = bool(getattr(resp, "has_next_page", False)) + next_token = getattr(resp, "next_page", None) if has_next else None + if not (aggregate_pages and has_next and next_token): + break + return out + + # Branch 1: database_id provided + if database_id: + backups = _list_all_backups_for_db(database_id) + # Fetch and set db_unique_name for this database + try: + gdb = client.get_database(database_id=database_id) + gdd = _to_dict(getattr(gdb, "data", None)) + dun = gdd.get("dbUniqueName") or gdd.get("db_unique_name") + except Exception: + dun = None + for bk in backups: + if "db_unique_name" not in bk or bk["db_unique_name"] is None: + bk["db_unique_name"] = dun + return backups + + # Branch 2: compartment_id and region provided + if compartment_id: + # find DB Homes then list AVAILABLE databases + home_ids = _fetch_db_home_ids_for_compartment(compartment_id, region=region) + eligible_db_ids: list[str] = [] + db_unique_cache: dict[str, Optional[str]] = {} + for hid in home_ids or []: + next_db_page = None + while True: + kwargs_db = { + "compartment_id": compartment_id, + "db_home_id": hid, + "lifecycle_state": "AVAILABLE", + "limit": 1000, + } + if next_db_page: + kwargs_db["page"] = next_db_page + dresp = client.list_databases(**kwargs_db) + ditems = getattr(dresp.data, "items", dresp.data) or [] + for d in ditems: + d_dict = _to_dict(d) + dbid = d_dict.get("id") or getattr(d, "id", None) + dun = d_dict.get("dbUniqueName") or d_dict.get("db_unique_name") or getattr(d, "db_unique_name", None) + is_auto = _is_auto_backup_enabled_from_dict(d_dict) + if is_auto is False and dbid: + # fallback to GET for authoritative value and db_unique_name + try: + g = client.get_database(database_id=dbid) + gdd = _to_dict(getattr(g, "data", None)) + is_auto = _is_auto_backup_enabled_from_dict(gdd) + if dun is None: + dun = gdd.get("dbUniqueName") or gdd.get("db_unique_name") + except Exception: + is_auto = False + if dbid: + if dun is not None: + db_unique_cache[dbid] = dun + if is_auto: + eligible_db_ids.append(dbid) + has_next = bool(getattr(dresp, "has_next_page", False)) + next_db_page = getattr(dresp, "next_page", None) if has_next else None + if not has_next: + break + # Aggregate backups for eligible DBs + all_results: list[dict] = [] + for dbid in eligible_db_ids: + backups = _list_all_backups_for_db(dbid) + # Set db_unique_name from cache + for bk in backups: + if "db_unique_name" not in bk or bk["db_unique_name"] is None: + bk["db_unique_name"] = db_unique_cache.get(dbid) + all_results.extend(backups) + return all_results + + # Neither database_id nor (compartment_id and region) provided + raise ValueError("Provide database_id, or compartment_id and region.") + except Exception as e: - logger.error(f"Error in list_backups tool: {e}") + logger.error("Error in list_backups tool: %s", e) raise -@mcp.tool(description="Retrieves a Database Backup by OCID.") +@mcp.tool(description=( + "Gets a database backup by OCID and returns a clean dictionary. It includes common fields like database size, backup destination, and the database's unique name. The result is one backup with those helpful fields included." +)) def get_backup( backup_id: Annotated[str, "OCID of the Backup to retrieve."], region: Annotated[ Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." ] = None, ) -> Backup: + """ + Retrieves a Database Backup by OCID and maps it to the server model. + Mirrors the simpler logic used in rcv_mcp_server/fast_server.py without additional enrichment. + """ try: client = get_database_client(region) resp = client.get_backup(backup_id=backup_id) - return map_backup(resp.data) + mapped = map_backup(resp.data) + try: + out = mapped.model_dump(exclude_none=False, by_alias=True) + except Exception: + try: + out = mapped.dict(exclude_none=False, by_alias=True) + except Exception: + out = getattr(mapped, "__dict__", {}) or {} + # Try to augment from raw SDK object dict if mapping missed fields + try: + rawd = ( + oci.util.to_dict(resp.data) + if hasattr(oci, "util") and hasattr(oci.util, "to_dict") + else (getattr(resp.data, "__dict__", {}) or {}) + ) + except Exception: + rawd = getattr(resp.data, "__dict__", {}) or {} + + def _pick(d: dict, *keys: str): + for k in keys: + if k in d and d[k] is not None: + return d[k] + return None + + if out.get("database-size-in-gbs") is None: + ds = _pick(rawd, "database_size_in_gbs", "databaseSizeInGBs", "databaseSizeInGbs") + if ds is not None: + out["database-size-in-gbs"] = ds + if out.get("backup-destination-type") is None: + bdt = _pick(rawd, "backup_destination_type", "backupDestinationType") + if bdt is not None: + out["backup-destination-type"] = bdt + if out.get("retention-period-in-days") is None: + rpd = _pick(rawd, "retention_period_in_days", "retentionPeriodInDays") + if rpd is not None: + out["retention-period-in-days"] = rpd + if out.get("retention-period-in-years") is None: + rpy = _pick(rawd, "retention_period_in_years", "retentionPeriodInYears") + if rpy is not None: + out["retention-period-in-years"] = rpy + + # Infer destination from DB backup config if still missing (no Recovery Service calls) + try: + dbid = out.get("database_id") or rawd.get("databaseId") + if (out.get("backup-destination-type") is None) and dbid: + gdb = client.get_database(database_id=dbid) + gdd = oci.util.to_dict(getattr(gdb, "data", None)) if hasattr(oci, "util") and hasattr(oci.util, "to_dict") else (getattr(getattr(gdb, "data", None), "__dict__", {}) or {}) + cfg = gdd.get("dbBackupConfig") or gdd.get("db_backup_config") or gdd.get("databaseBackupConfig") + details = None + if isinstance(cfg, dict): + details = cfg.get("backupDestinationDetails") or cfg.get("backup_destination_details") + if not details: + details = gdd.get("backupDestinationDetails") or gdd.get("backup_destination_details") + det_list = details if isinstance(details, list) else ([details] if details else []) + types = [] + for det in det_list: + dd = det if isinstance(det, dict) else (oci.util.to_dict(det) if hasattr(oci, "util") and hasattr(oci.util, "to_dict") else det.__dict__ if hasattr(det, "__dict__") else {}) + t = (dd or {}).get("type") or (dd or {}).get("destinationType") + tnorm = (str(t).upper() if t else None) + if tnorm in ("RECOVERY_SERVICE", "RECOVERY-SERVICE", "DBRS", "RECOVERY_SERVICE_BACKUP_DESTINATION"): + types.append("DBRS") + elif tnorm in ("OBJECT_STORE", "OBJECTSTORE", "OBJECT_STORAGE"): + types.append("OBJECT_STORE") + elif tnorm in ("NFS",): + types.append("NFS") + if "DBRS" in types: + out["backup-destination-type"] = "DBRS" + elif "OBJECT_STORE" in types: + out["backup-destination-type"] = "OBJECT_STORE" + elif "NFS" in types: + out["backup-destination-type"] = "NFS" + except Exception: + pass + + # Ensure db_unique_name on model and output + try: + dbid = out.get("database_id") or rawd.get("databaseId") or rawd.get("database_id") + if dbid: + try: + gdb = client.get_database(database_id=dbid) + gdd = ( + oci.util.to_dict(getattr(gdb, "data", None)) + if hasattr(oci, "util") and hasattr(oci.util, "to_dict") + else (getattr(getattr(gdb, "data", None), "__dict__", {}) or {}) + ) + dun = gdd.get("dbUniqueName") or gdd.get("db_unique_name") + try: + if getattr(mapped, "db_unique_name", None) is None: + mapped.db_unique_name = dun + except Exception: + pass + if dun is not None: + out["db_unique_name"] = dun + except Exception: + pass + except Exception: + pass + + # Ensure CLI-style keys are present even when values are still null + for _k in ( + "database-size-in-gbs", + "backup-destination-type", + "retention-period-in-days", + "retention-period-in-years", + ): + if _k not in out: + out[_k] = None + return out except Exception as e: - logger.error(f"Error in get_backup tool: {e}") + logger.error("Error in get_backup tool: %s", e) raise @mcp.tool( description=( - "Summarizes Database backup configuration and destinations " - "for databases in a compartment or DB Home. " - "Reports counts by destination type (e.g., DBRS, OBJECT_STORE, NFS), " - "number unconfigured, and per-DB details. " - "If db_home_id is omitted, the tool automatically discovers all DB Homes " - "in the compartment and aggregates per-home." + "Summarizes how databases in a compartment or DB Home are backed up. It can find DB Homes, looks at each database’s backup settings, can include the time of the most recent backup, and groups results by destination type while calling out databases that aren’t configured. The result is one summary object with counts, name lists, and per‑database details." ) ) -def summarize_protected_database_backup_destination( +def summarise_protected_database_backup_destination( compartment_id: Annotated[ Optional[str], "OCID of the compartment. If omitted, defaults to the tenancy/DEFAULT profile.", @@ -1511,64 +2058,69 @@ def summarize_protected_database_backup_destination( include_last_backup_time: Annotated[ bool, "If true, compute last backup time per DB (extra API calls)." ] = False, + db_name: Annotated[ + Optional[str], "Exact database name filter (case-insensitive)." + ] = None, + limit_per_home: Annotated[ + Optional[int], "Max databases to fetch per DB Home." + ] = None, + max_db_homes: Annotated[ + Optional[int], "Max number of DB Homes to scan." + ] = None, + max_total_databases: Annotated[ + Optional[int], "Global cap on databases to scan." + ] = None, ) -> ProtectedDatabaseBackupDestinationSummary: try: db_client = get_database_client(region) - rec_client = get_recovery_client(region) if not compartment_id: compartment_id = get_tenancy() - # Discover DB Homes if not specified + # Discover DB Homes if not specified, then list databases with lifecycle_state=AVAILABLE home_ids: list[str] = ( [db_home_id] if db_home_id else _fetch_db_home_ids_for_compartment(compartment_id, region=region) ) - # Collect database summaries for those DB Homes (AVAILABLE only) + # Explicitly bind the SDK method to avoid any accidental reference to the MCP tool + list_dbs_method = getattr(db_client, "list_databases") db_summaries: list[Any] = [] if home_ids: - for hid in home_ids: - resp = db_client.list_databases( - compartment_id=compartment_id, - db_home_id=hid, - lifecycle_state="AVAILABLE", - ) - data = getattr(resp.data, "items", resp.data) - if isinstance(data, list): - db_summaries.extend(data) - elif data is not None: - db_summaries.append(data) + for hid in (home_ids[:max_db_homes] if (max_db_homes is not None) else home_ids): + call_kwargs = { + "compartment_id": compartment_id, + "db_home_id": hid, + "lifecycle_state": "AVAILABLE", + } + if db_name is not None: + call_kwargs["db_name"] = db_name + if limit_per_home is not None: + call_kwargs["limit"] = limit_per_home + next_page = None + while True: + local_kwargs = dict(call_kwargs) + if next_page: + local_kwargs["page"] = next_page + resp = list_dbs_method(**local_kwargs) + data = getattr(resp.data, "items", resp.data) + if isinstance(data, list): + db_summaries.extend(data) + elif data is not None: + db_summaries.append(data) + if max_total_databases is not None and len(db_summaries) >= max_total_databases: + db_summaries = db_summaries[:max_total_databases] + break + has_next = bool(getattr(resp, "has_next_page", False)) + next_page = getattr(resp, "next_page", None) if has_next else None + if not has_next: + break # Build a map of database_id -> list of Protected Databases (from Recovery Service) # This allows us to infer DBRS (Recovery Service) as a destination type even if DB backup config # does not explicitly list it as a destination, by virtue of PD linkage. + # Simplified: do not correlate via Recovery Protected Databases pd_by_dbid: dict[str, list[dict]] = {} - try: - has_next = True - next_page = None - while has_next: - lp = rec_client.list_protected_databases( - compartment_id=compartment_id, page=next_page - ) - has_next = lp.has_next_page - next_page = getattr(lp, "next_page", None) - pdata = lp.data - pitems = getattr(pdata, "items", pdata) - for it in pitems or []: - # convert to dict for easy field access - try: - if hasattr(oci, "util") and hasattr(oci.util, "to_dict"): - d = oci.util.to_dict(it) - else: - d = getattr(it, "__dict__", {}) or {} - except Exception: - d = getattr(it, "__dict__", {}) or {} - dbid = d.get("databaseId") or d.get("database_id") - if dbid: - pd_by_dbid.setdefault(dbid, []).append(d) - except Exception: - pd_by_dbid = {} # Helper routines to normalize SDK objects and read fields across variants def _to_dict(o: Any) -> dict: @@ -1633,7 +2185,7 @@ def _normalize_dest_type(t: Optional[str]) -> str: ): return "DBRS" if u in ("OBJECT_STORE", "OBJECTSTORE", "OBJECT_STORAGE"): - return "OSS" + return "OBJECT_STORE" if u in ("NFS",): return "NFS" return u @@ -1712,9 +2264,28 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: continue db_name = _get(s, "db_name", "dbName") - dresp = get_db(database_id=sid) - d_obj = getattr(dresp, "data", None) - d_dict = _to_dict(d_obj) + # Prefer backup config from summary item to avoid per-DB GET when possible + d_obj = None + d_dict = _to_dict(s) + cfg_present = False + try: + cfg_present = any( + isinstance(d_dict.get(k), dict) + for k in ( + "dbBackupConfig", + "db_backup_config", + "databaseBackupConfig", + "database_backup_config", + "backupConfig", + "backup_config", + ) + ) + except Exception: + cfg_present = False + if not cfg_present: + dresp = get_db(database_id=sid) + d_obj = getattr(dresp, "data", None) + d_dict = _to_dict(d_obj) # Extract configured destination details (normalize to a list of dicts) dest_details = _extract_backup_destination_details(d_dict) @@ -1737,26 +2308,17 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: # Augment with Recovery Service protected database linkage pds_for_db = pd_by_dbid.get(sid, []) - if pds_for_db: - dest_types.append("DBRS") - try: - # Use PD OCID for reference if present - dest_ids.append(pds_for_db[0].get("id")) - except Exception: - pass + # Simplified: do not infer DBRS from Protected Database linkage - # Deduplicate destinations and IDs - dest_types = list(dict.fromkeys([t for t in dest_types if t])) - # Enforce exclusivity between DBRS and OSS: - # prefer DBRS (no dual classification) - if "DBRS" in dest_types and "OSS" in dest_types: + # Deduplicate and restrict to DBRS/OBJECT_STORE; prefer DBRS if both + dest_types = list(dict.fromkeys([t for t in dest_types if t in ("DBRS", "OBJECT_STORE")])) + if "DBRS" in dest_types and "OBJECT_STORE" in dest_types: dest_types = ["DBRS"] dest_ids = list(dict.fromkeys([d for d in dest_ids if d])) auto_enabled = _is_auto_backup_enabled(d_dict) - # Consider configured if auto-backup enabled OR any destination types - # detected (incl. DBRS via Recovery Service) - configured = bool(auto_enabled or len(dest_types) > 0) + # Configured strictly when auto-backup is enabled + configured = bool(auto_enabled) status = "CONFIGURED" if configured else "UNCONFIGURED" last_backup_time = None @@ -1777,34 +2339,19 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: best = t if best is not None: last_backup_time = best - if status != "CONFIGURED": - status = "HAS_BACKUPS" except Exception: pass else: - # Lightweight existence check for backups - try: - b_resp = list_bk(database_id=sid, limit=1) - b_data = getattr(b_resp.data, "items", b_resp.data) - has_any = ( - (len(b_data) > 0) - if isinstance(b_data, list) - else (b_data is not None) - ) - if status != "CONFIGURED" and has_any: - status = "HAS_BACKUPS" - except Exception: - pass + pass # Aggregate summary counters and name lists by status/destination name_for_lists = db_name or sid if status == "CONFIGURED": - for ut in set(dest_types): - if ut != "UNKNOWN": - counts_by_type[ut] = counts_by_type.get(ut, 0) + 1 - db_names_by_type.setdefault(ut, []).append(name_for_lists) - elif status == "HAS_BACKUPS": - has_backups_names.append(name_for_lists) + # Select a single effective destination type: DBRS preferred over OBJECT_STORE + eff_type = "DBRS" if "DBRS" in dest_types else ("OBJECT_STORE" if "OBJECT_STORE" in dest_types else "UNKNOWN") + if eff_type in ("DBRS", "OBJECT_STORE"): + counts_by_type[eff_type] = counts_by_type.get(eff_type, 0) + 1 + db_names_by_type.setdefault(eff_type, []).append(name_for_lists) else: unconfigured += 1 unconfigured_names.append(name_for_lists) @@ -1824,23 +2371,17 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: # Continue on per-DB errors to maximize overall coverage continue - # Sorting helpers: prioritize DBRS over OSS/NFS, then by status, then by name + # Sorting helpers: prioritize DBRS over OBJECT_STORE and then by name def _dest_rank(types: list[str]) -> int: if not types: return 99 - order = {"DBRS": 0, "OSS": 1, "NFS": 2, "UNKNOWN": 3} + order = {"DBRS": 0, "OBJECT_STORE": 1, "NFS": 2, "UNKNOWN": 3} return min(order.get(t, 3) for t in types) - def _status_rank(st: Optional[str]) -> int: - return {"CONFIGURED": 0, "HAS_BACKUPS": 1, "UNCONFIGURED": 2}.get( - (st or "").upper(), 3 - ) - items = sorted( items, key=lambda it: ( _dest_rank(it.destination_types), - _status_rank(it.status), (it.db_name or ""), ), ) @@ -1870,11 +2411,15 @@ def _sorted_keep(xs: list[str]) -> list[str]: ) except Exception as e: logger.error( - f"Error in summarize_protected_database_backup_destination tool: {e}" + f"Error in summarise_protected_database_backup_destination tool: {e}" ) raise - +@mcp.tool( + description=( + "Lists database homes in a compartment with optional lifecycle filters, defaulting to your tenancy when no compartment is given, and handles paging for you. The result is a list of database home summaries." + ) +) def list_db_homes( compartment_id: Annotated[ Optional[str], "OCID of the compartment to scope the search." @@ -1918,7 +2463,9 @@ def list_db_homes( raise -@mcp.tool(description="Retrieves a single Database Home by OCID.") +@mcp.tool(description=( + "Gets a database home by OCID and returns it as a simple object. The result is one database home." +)) def get_db_home( db_home_id: Annotated[str, "OCID of the DB Home to retrieve."], region: Annotated[ @@ -1936,8 +2483,7 @@ def get_db_home( @mcp.tool( description=( - "Lists Database Systems in the specified compartment with optional lifecycle filters. " - "If compartment_id is omitted, defaults to tenancy compartment." + "Lists database systems in a compartment with optional lifecycle filters, defaulting to your tenancy when no compartment is given, and handles paging for you. The result is a list of database system summaries." ) ) def list_db_systems( @@ -1980,7 +2526,9 @@ def list_db_systems( raise -@mcp.tool(description="Retrieves a single Database System by OCID.") +@mcp.tool(description=( + "Gets a database system by OCID and returns it as a convenient object. The result is one database system." +)) def get_db_system( db_system_id: Annotated[str, "OCID of the DB System to retrieve."], region: Annotated[ From b8758d69d56ff368d5203b1b678e1664df5266fe Mon Sep 17 00:00:00 2001 From: hagavisi Date: Wed, 4 Feb 2026 17:16:40 +0530 Subject: [PATCH 09/11] Fix testcases and lint issues --- .../oracle/oci_recovery_mcp_server/models.py | 92 +++-- .../oracle/oci_recovery_mcp_server/server.py | 381 +++++++++++++----- .../tests/test_recovery_database_tools.py | 103 ++--- .../tests/test_recovery_tools.py | 36 +- src/oci-recovery-mcp-server/pyproject.toml | 1 + 5 files changed, 394 insertions(+), 219 deletions(-) diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py index 1c2af50a..17c13792 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py @@ -384,21 +384,28 @@ def map_protected_database( database_id=getattr(pd, "database_id", None) or data.get("database_id") or data.get("databaseId"), + db_unique_name=getattr(pd, "db_unique_name", None) + or data.get("db_unique_name"), + vpc_user_name=getattr(pd, "vpc_user_name", None) or data.get("vpc_user_name"), database_size=getattr(pd, "database_size", None) or data.get("database_size") or data.get("databaseSize"), - database_size_in_gbs=getattr(pd, "database_size_in_gbs", None) - or data.get("database_size_in_gbs") - or data.get("databaseSizeInGBs") - or data.get("databaseSizeInGbs"), - change_rate=getattr(pd, "change_rate", None) - or data.get("change_rate") - or data.get("changeRate"), - compression_ratio=getattr(pd, "compression_ratio", None) - or data.get("compression_ratio") - or data.get("compressionRatio"), - db_unique_name=getattr(pd, "db_unique_name", None) - or data.get("db_unique_name"), + database_size_in_gbs=( + getattr(pd, "database_size_in_gbs", None) + or data.get("database_size_in_gbs") + or data.get("databaseSizeInGBs") + or data.get("databaseSizeInGbs") + ), + change_rate=( + getattr(pd, "change_rate", None) + or data.get("change_rate") + or data.get("changeRate") + ), + compression_ratio=( + getattr(pd, "compression_ratio", None) + or data.get("compression_ratio") + or data.get("compressionRatio") + ), lifecycle_state=getattr(pd, "lifecycle_state", None) or data.get("lifecycle_state"), lifecycle_details=getattr(pd, "lifecycle_details", None) @@ -411,9 +418,11 @@ def map_protected_database( or data.get("is_read_only_resource") or data.get("isReadOnlyResource"), health=getattr(pd, "health", None) or data.get("health"), - is_redo_logs_shipped=getattr(pd, "is_redo_logs_shipped", None) - or data.get("is_redo_logs_shipped") - or data.get("isRedoLogsShipped"), + is_redo_logs_shipped=( + getattr(pd, "is_redo_logs_shipped", None) + or data.get("is_redo_logs_shipped") + or data.get("isRedoLogsShipped") + ), metrics=map_metrics(getattr(pd, "metrics", None) or data.get("metrics")), subscription_id=getattr(pd, "subscription_id", None) or data.get("subscription_id") @@ -433,8 +442,6 @@ def map_protected_database( system_tags=getattr(pd, "system_tags", None) or data.get("system_tags") or data.get("systemTags"), - vpc_user_name=getattr(pd, "vpc_user_name", None) - or data.get("vpc_user_name"), ) @@ -526,7 +533,12 @@ def _normalize_subnets(val): if isinstance(it, str): out.append(it) elif isinstance(it, dict): - ocid = it.get("id") or it.get("ocid") or it.get("subnetId") or it.get("subnet_id") + ocid = ( + it.get("id") + or it.get("ocid") + or it.get("subnetId") + or it.get("subnet_id") + ) if ocid: out.append(ocid) except Exception: @@ -535,8 +547,12 @@ def _normalize_subnets(val): # Normalize primary identifiers for VCN/subnet and ensure 'subnets' includes subnet_id when list is absent vcn_id_val = getattr(rss, "vcn_id", None) or data.get("vcn_id") or data.get("vcnId") - subnet_id_val = getattr(rss, "subnet_id", None) or data.get("subnet_id") or data.get("subnetId") - subnets_val = _normalize_subnets(data.get("subnets")) or ([subnet_id_val] if subnet_id_val else None) + subnet_id_val = ( + getattr(rss, "subnet_id", None) or data.get("subnet_id") or data.get("subnetId") + ) + subnets_val = _normalize_subnets(data.get("subnets")) or ( + [subnet_id_val] if subnet_id_val else None + ) return RecoveryServiceSubnet( id=getattr(rss, "id", None) or data.get("id"), @@ -731,9 +747,11 @@ def map_protected_database_summary( database_id=getattr(pds, "database_id", None) or data.get("database_id") or data.get("databaseId"), - db_unique_name=getattr(pds, "db_unique_name", None) - or data.get("db_unique_name") - or data.get("dbUniqueName"), + db_unique_name=( + getattr(pds, "db_unique_name", None) + or data.get("db_unique_name") + or data.get("dbUniqueName") + ), vpc_user_name=getattr(pds, "vpc_user_name", None) or data.get("vpc_user_name") or data.get("vpcUserName"), @@ -1140,9 +1158,7 @@ def map_metrics(m) -> Metrics | None: retention_period_in_days=getattr(m, "retention_period_in_days", None) or data.get("retention_period_in_days") or data.get("retentionPeriodInDays"), - unprotected_window_in_seconds=getattr( - m, "unprotected_window_in_seconds", None - ) + unprotected_window_in_seconds=getattr(m, "unprotected_window_in_seconds", None) or data.get("unprotected_window_in_seconds") or data.get("unprotectedWindowInSeconds"), ) @@ -1854,22 +1870,28 @@ class BackupSummary(OCIBaseModel): retention_period_in_days: Optional[float] = Field( None, alias="retention-period-in-days", - description="Retention period (days) inferred from Recovery protection policy, when available." + description="Retention period (days) inferred from Recovery protection policy, when available.", ) retention_period_in_years: Optional[float] = Field( None, alias="retention-period-in-years", - description="Retention period (years), derived from days when available." + description="Retention period (years), derived from days when available.", ) database_size_in_gbs: Optional[float] = Field( None, alias="database-size-in-gbs", - description="Database size in GBs (from Recovery metrics) for the database that this backup belongs to." + description=( + "Database size in GBs (from Recovery metrics) for the database " + "that this backup belongs to." + ), ) backup_destination_type: Optional[str] = Field( None, alias="backup-destination-type", - description="Primary backup destination type for the database (e.g., DBRS, OBJECT_STORE, NFS, UNKNOWN)." + description=( + "Primary backup destination type for the database " + "(e.g., DBRS, OBJECT_STORE, NFS, UNKNOWN)." + ), ) @@ -1950,12 +1972,18 @@ class Backup(OCIBaseModel): database_size_in_gbs: Optional[float] = Field( None, alias="database-size-in-gbs", - description="Database size in GBs (from Recovery metrics) for the database that this backup belongs to.", + description=( + "Database size in GBs (from Recovery metrics) for the database " + "that this backup belongs to." + ), ) backup_destination_type: Optional[str] = Field( None, alias="backup-destination-type", - description="Primary backup destination type for the database (e.g., DBRS, OBJECT_STORE, NFS, UNKNOWN).", + description=( + "Primary backup destination type for the database " + "(e.g., DBRS, OBJECT_STORE, NFS, UNKNOWN)." + ), ) diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index 7a39c377..f1b0ea88 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -59,9 +59,7 @@ ProtectedDatabaseRedoCounts, ProtectedDatabaseSummary, ProtectionPolicy, - ProtectionPolicySummary, RecoveryServiceSubnet, - RecoveryServiceSubnetSummary, map_backup, map_backup_summary, map_database, @@ -74,10 +72,8 @@ map_protected_database, map_protected_database_summary, map_protection_policy, - map_protection_policy_summary, - map_recovery_service_subnet_details, map_recovery_service_subnet, - map_recovery_service_subnet_summary, + map_recovery_service_subnet_details, ) from . import __project__, __version__ @@ -98,7 +94,7 @@ - get_database - list_backups - get_backup -- summarise_protected_database_backup_destination +- summarize_protected_database_backup_destination - get_db_home - list_db_systems - get_db_system @@ -310,7 +306,6 @@ def _fetch_db_home_ids_for_compartment( """ try: client = get_database_client(region) - rec_client = get_recovery_client(region) resp = client.list_db_homes(compartment_id=compartment_id) data = resp.data # Normalize list shape (SDK may use .items or a raw list) @@ -355,9 +350,13 @@ def get_compartment_by_name(compartment_name: str): return None -@mcp.tool(description=( - "Finds a compartment by name. It searches all accessible compartments in your tenancy (including the root) without worrying about letter case, and returns the match as JSON or a clear error if none is found." -)) +@mcp.tool( + description=( + "Finds a compartment by name. It searches all accessible compartments in your " + "tenancy (including the root) without worrying about letter case, and returns " + "the match as JSON or a clear error if none is found." + ) +) def get_compartment_by_name_tool(name: str) -> str: """Return a compartment matching the provided name""" compartment = get_compartment_by_name(name) @@ -369,7 +368,10 @@ def get_compartment_by_name_tool(name: str) -> str: @mcp.tool( description=( - "Lists protected databases in a compartment with optional filters. For each database it also includes Recovery Service Subnet details, removes noisy fields, and adds basic per‑database metrics. The result is a list of simple dictionaries, each with cleaned subnet information and a small metrics map." + "Lists protected databases in a compartment with optional filters. For each " + "database it also includes Recovery Service Subnet details, removes noisy " + "fields, and adds basic per‑database metrics. The result is a list of simple " + "dictionaries, each with cleaned subnet information and a small metrics map." ) ) def list_protected_databases( @@ -483,11 +485,15 @@ def list_protected_databases( ) if needs_enrich: try: - rss_resp: oci.response.Response = client.get_recovery_service_subnet( - recovery_service_subnet_id=rss_id + rss_resp: oci.response.Response = ( + client.get_recovery_service_subnet( + recovery_service_subnet_id=rss_id + ) ) full_rss = rss_resp.data - mapped_det = map_recovery_service_subnet_details(full_rss) + mapped_det = map_recovery_service_subnet_details( + full_rss + ) enriched.append(mapped_det or det) except Exception: enriched.append(det) @@ -526,7 +532,9 @@ def list_protected_databases( pdid = pd_dict.get("id") or getattr(pd_summary, "id", None) if pdid: try: - g = client.get_protected_database(protected_database_id=pdid) + g = client.get_protected_database( + protected_database_id=pdid + ) full_pd = map_protected_database(getattr(g, "data", None)) mobj = getattr(full_pd, "metrics", None) md = None @@ -545,14 +553,28 @@ def _pick(d: dict | None, key: str): return d.get(key) metrics_out = { - "backup-space-estimate-in-gbs": _pick(md, "backup_space_estimate_in_gbs"), - "backup-space-used-in-gbs": _pick(md, "backup_space_used_in_gbs"), - "current-retention-period-in-seconds": _pick(md, "current_retention_period_in_seconds"), + "backup-space-estimate-in-gbs": _pick( + md, "backup_space_estimate_in_gbs" + ), + "backup-space-used-in-gbs": _pick( + md, "backup_space_used_in_gbs" + ), + "current-retention-period-in-seconds": _pick( + md, "current_retention_period_in_seconds" + ), "db-size-in-gbs": _pick(md, "database_size_in_gbs"), - "is-redo-logs-enabled": _pick(md, "is_redo_logs_enabled"), - "minimum-recovery-needed-in-days": _pick(md, "minimum_recovery_needed_in_days"), - "retention-period-in-days": _pick(md, "retention_period_in_days"), - "unprotected-window-in-seconds": _pick(md, "unprotected_window_in_seconds"), + "is-redo-logs-enabled": _pick( + md, "is_redo_logs_enabled" + ), + "minimum-recovery-needed-in-days": _pick( + md, "minimum_recovery_needed_in_days" + ), + "retention-period-in-days": _pick( + md, "retention_period_in_days" + ), + "unprotected-window-in-seconds": _pick( + md, "unprotected_window_in_seconds" + ), } pd_dict["metrics"] = metrics_out except Exception: @@ -573,7 +595,10 @@ def _pick(d: dict | None, key: str): @mcp.tool( description=( - "Gets a protected database by OCID and presents a clean, easy‑to‑read view. It includes Recovery Service Subnet details, hides noisy fields, and adds core metrics. The result is one protected database as a plain dictionary with subnet info and a simple metrics section." + "Gets a protected database by OCID and presents a clean, easy‑to‑read view. " + "It includes Recovery Service Subnet details, hides noisy fields, and adds " + "core metrics. The result is one protected database as a plain dictionary " + "with subnet info and a simple metrics section." ) ) def get_protected_database( @@ -626,8 +651,10 @@ def get_protected_database( ) if needs_enrich: try: - rss_resp: oci.response.Response = client.get_recovery_service_subnet( - recovery_service_subnet_id=rss_id + rss_resp: oci.response.Response = ( + client.get_recovery_service_subnet( + recovery_service_subnet_id=rss_id + ) ) full_rss = rss_resp.data mapped_det = map_recovery_service_subnet_details(full_rss) @@ -685,7 +712,8 @@ def get_protected_database( cleaned_rss.append(d) pd_dict["recovery_service_subnets"] = cleaned_rss - # Normalize metrics to OCI CLI style keys using only values present on PD.metrics (no derivations/fallbacks) + # Normalize metrics to OCI CLI style keys using only values present on + # PD.metrics (no derivations/fallbacks) metrics_obj = getattr(pd, "metrics", None) metrics_dict = None if metrics_obj is not None: @@ -703,14 +731,22 @@ def _pick(d: dict | None, key: str): return d.get(key) metrics_out = { - "backup-space-estimate-in-gbs": _pick(metrics_dict, "backup_space_estimate_in_gbs"), + "backup-space-estimate-in-gbs": _pick( + metrics_dict, "backup_space_estimate_in_gbs" + ), "backup-space-used-in-gbs": _pick(metrics_dict, "backup_space_used_in_gbs"), - "current-retention-period-in-seconds": _pick(metrics_dict, "current_retention_period_in_seconds"), + "current-retention-period-in-seconds": _pick( + metrics_dict, "current_retention_period_in_seconds" + ), "db-size-in-gbs": _pick(metrics_dict, "database_size_in_gbs"), "is-redo-logs-enabled": _pick(metrics_dict, "is_redo_logs_enabled"), - "minimum-recovery-needed-in-days": _pick(metrics_dict, "minimum_recovery_needed_in_days"), + "minimum-recovery-needed-in-days": _pick( + metrics_dict, "minimum_recovery_needed_in_days" + ), "retention-period-in-days": _pick(metrics_dict, "retention_period_in_days"), - "unprotected-window-in-seconds": _pick(metrics_dict, "unprotected_window_in_seconds"), + "unprotected-window-in-seconds": _pick( + metrics_dict, "unprotected_window_in_seconds" + ), } pd_dict["metrics"] = metrics_out @@ -723,7 +759,10 @@ def _pick(d: dict | None, key: str): @mcp.tool( description=( - "Shows how many protected databases are healthy, warning, alert, or unknown in a compartment. If a quick list doesn’t include health, it checks each database to fill it in. The result is a small JSON with the counts, the compartmentId, and the region." + "Shows how many protected databases are healthy, warning, alert, or unknown " + "in a compartment. If a quick list doesn’t include health, it checks each " + "database to fill it in. The result is a small JSON with the counts, the " + "compartmentId, and the region." ) ) def summarize_protected_database_health( @@ -862,7 +901,10 @@ def summarize_protected_database_health( @mcp.tool( description=( - "Shows how many protected databases have redo transport turned on or off in a compartment. It reads the main setting and uses a fallback when needed. The result is a simple JSON with enabled, disabled, total, the compartmentId, and the region." + "Shows how many protected databases have redo transport turned on or off in " + "a compartment. It reads the main setting and uses a fallback when needed. " + "The result is a simple JSON with enabled, disabled, total, the compartmentId, " + "and the region." ) ) def summarize_protected_database_redo_status( @@ -987,7 +1029,11 @@ def summarize_protected_database_redo_status( @mcp.tool( description=( - "Adds up the backup space (in GB) used by protected databases in a compartment, including only those with lifecycle state ACTIVE or DELETE_SCHEDULED (excluding DELETED). It reads each database’s metrics and also tells you how many databases were checked. The result is a small JSON with the compartmentId, region, totalDatabasesScanned, and the total space in GB." + "Adds up the backup space (in GB) used by protected databases in a compartment, " + "including only those with lifecycle state ACTIVE or DELETE_SCHEDULED (excluding " + "DELETED). It reads each database’s metrics and also tells you how many databases " + "were checked. The result is a small JSON with the compartmentId, region, " + "totalDatabasesScanned, and the total space in GB." ) ) def summarize_backup_space_used( @@ -1031,11 +1077,14 @@ def summarize_backup_space_used( items = getattr(data, "items", data) for item in items or []: - # Filter by lifecycle state: include only ACTIVE or DELETE_SCHEDULED (exclude DELETED and others) + # Filter by lifecycle state: include only ACTIVE or DELETE_SCHEDULED + # (exclude DELETED and others) try: lifecycle_state = getattr(item, "lifecycle_state", None) if not lifecycle_state and hasattr(item, "__dict__"): - lifecycle_state = (getattr(item, "__dict__", {}) or {}).get("lifecycle_state") or (getattr(item, "__dict__", {}) or {}).get("lifecycleState") + lifecycle_state = (getattr(item, "__dict__", {}) or {}).get( + "lifecycle_state" + ) or (getattr(item, "__dict__", {}) or {}).get("lifecycleState") except Exception: lifecycle_state = None if lifecycle_state not in ("ACTIVE", "DELETE_SCHEDULED"): @@ -1132,7 +1181,8 @@ def summarize_backup_space_used( @mcp.tool( description=( - "Lists protection policies in a compartment with handy filters and automatic paging. The result is a straightforward list of protection policies." + "Lists protection policies in a compartment with handy filters and automatic " + "paging. The result is a straightforward list of protection policies." ) ) def list_protection_policies( @@ -1210,9 +1260,9 @@ def list_protection_policies( raise -@mcp.tool(description=( - "Gets a protection policy by OCID and returns it as a simple object." -)) +@mcp.tool( + description=("Gets a protection policy by OCID and returns it as a simple object.") +) def get_protection_policy( protection_policy_id: Annotated[str, "Protection Policy OCID"], opc_request_id: Annotated[ @@ -1249,7 +1299,10 @@ def get_protection_policy( @mcp.tool( description=( - "Lists recovery service subnets in a compartment with helpful filters. When needed, it fills in the list of associated subnets or uses the subnet_id as a fallback. The result is a simple list of subnets with the subnets list included when available." + "Lists recovery service subnets in a compartment with helpful filters. When " + "needed, it fills in the list of associated subnets or uses the subnet_id as " + "a fallback. The result is a simple list of subnets with the subnets list " + "included when available." ) ) def list_recovery_service_subnets( @@ -1331,7 +1384,9 @@ def list_recovery_service_subnets( rss_id = getattr(rss, "id", None) if missing_subnets and rss_id: try: - g = client.get_recovery_service_subnet(recovery_service_subnet_id=rss_id) + g = client.get_recovery_service_subnet( + recovery_service_subnet_id=rss_id + ) full = map_recovery_service_subnet(getattr(g, "data", None)) if full and getattr(full, "subnets", None): rss.subnets = full.subnets @@ -1357,9 +1412,13 @@ def list_recovery_service_subnets( raise -@mcp.tool(description=( - "Gets a recovery service subnet by OCID and makes sure the subnets list is present, using subnet_id if necessary. The result is one recovery service subnet." -)) +@mcp.tool( + description=( + "Gets a recovery service subnet by OCID and makes sure the subnets list is " + "present, using subnet_id if necessary. The result is one recovery service " + "subnet." + ) +) def get_recovery_service_subnet( recovery_service_subnet_id: Annotated[str, "Recovery Service Subnet OCID"], opc_request_id: Annotated[ @@ -1402,9 +1461,14 @@ def get_recovery_service_subnet( raise -@mcp.tool(description=( - "Fetches Recovery Service metrics for a time range. You choose the metric, time step, and how to combine values, and you can limit it to one protected database. The result is a simple time series where each item has dimensions and a list of {timestamp, value} points." -)) +@mcp.tool( + description=( + "Fetches Recovery Service metrics for a time range. You choose the metric, " + "time step, and how to combine values, and you can limit it to one protected " + "database. The result is a simple time series where each item has dimensions " + "and a list of {timestamp, value} points." + ) +) def get_recovery_service_metrics( compartment_id: str, start_time: str, @@ -1477,7 +1541,11 @@ def get_recovery_service_metrics( @mcp.tool( description=( - "Lists databases in a DB Home or, if none is given, across all DB Homes in a compartment. It can find DB Homes for you, fills in backup settings only when needed, and, where possible, links each database to its protection policy. The result is a list of database summaries with optional backup settings and protection policy ID." + "Lists databases in a DB Home or, if none is given, across all DB Homes in a " + "compartment. It can find DB Homes for you, fills in backup settings only when " + "needed, and, where possible, links each database to its protection policy. " + "The result is a list of database summaries with optional backup settings and " + "protection policy ID." ) ) def list_databases( @@ -1638,9 +1706,12 @@ def list_databases( raise -@mcp.tool(description=( - "Gets a database by OCID and returns an easy object. Where possible, it also links the database to its protection policy. The result is one database." -)) +@mcp.tool( + description=( + "Gets a database by OCID and returns an easy object. Where possible, it also " + "links the database to its protection policy. The result is one database." + ) +) def get_database( database_id: Annotated[str, "OCID of the Database to retrieve."], region: Annotated[ @@ -1706,7 +1777,12 @@ def get_database( @mcp.tool( description=( - "Lists database backups with flexible filters and optional auto-paging. If database_id is provided, lists all backups for that database. If compartment_id is provided, finds AVAILABLE databases with auto-backup enabled and lists their backups. It includes manual backups, automatic backups and LTR backups as well. It adds helpful fields like backup destination, database's unique name. The result is a list of easy-to-read backup summaries." + "Lists database backups with flexible filters and optional auto-paging. If " + "database_id is provided, lists all backups for that database. If compartment_id " + "is provided, finds AVAILABLE databases with auto-backup enabled and lists their " + "backups. It includes manual backups, automatic backups and LTR backups as well. " + "It adds helpful fields like backup destination, database's unique name. The " + "result is a list of easy-to-read backup summaries." ) ) def list_backups( @@ -1720,12 +1796,19 @@ def list_backups( type: Annotated[ Optional[str], "Backup type filter (e.g., INCREMENTAL, FULL)." ] = None, - limit: Annotated[Optional[int], "Maximum number of items per backend page (when aggregate_pages=false)."] = None, - page: Annotated[Optional[str], "Pagination token (opc-next-page) when aggregate_pages=false."] = None, + limit: Annotated[ + Optional[int], + "Maximum number of items per backend page (when aggregate_pages=false).", + ] = None, + page: Annotated[ + Optional[str], "Pagination token (opc-next-page) when aggregate_pages=false." + ] = None, region: Annotated[ Optional[str], "Canonical OCI region (e.g., us-ashburn-1)." ] = None, - aggregate_pages: Annotated[bool, "When true (default), retrieves all pages."] = True, + aggregate_pages: Annotated[ + bool, "When true (default), retrieves all pages." + ] = True, ) -> list[BackupSummary]: try: client = get_database_client(region) @@ -1742,13 +1825,25 @@ def _to_dict(o): def _is_auto_backup_enabled_from_dict(d: dict) -> bool: cfg = None - for k in ("dbBackupConfig","db_backup_config","backupConfig","backup_config","databaseBackupConfig","database_backup_config"): + for k in ( + "dbBackupConfig", + "db_backup_config", + "backupConfig", + "backup_config", + "databaseBackupConfig", + "database_backup_config", + ): v = d.get(k) if isinstance(v, dict): cfg = v break src = cfg if isinstance(cfg, dict) else d - for key in ("isAutoBackupEnabled","is_auto_backup_enabled","autoBackupEnabled","auto_backup_enabled"): + for key in ( + "isAutoBackupEnabled", + "is_auto_backup_enabled", + "autoBackupEnabled", + "auto_backup_enabled", + ): if key in src and src[key] is not None: return bool(src[key]) return False @@ -1799,19 +1894,30 @@ def _pick(d: dict, *keys: str): return None if out_dict.get("database-size-in-gbs") is None: - ds = _pick(rawd, "database_size_in_gbs", "databaseSizeInGBs", "databaseSizeInGbs") + ds = _pick( + rawd, + "database_size_in_gbs", + "databaseSizeInGBs", + "databaseSizeInGbs", + ) if ds is not None: out_dict["database-size-in-gbs"] = ds if out_dict.get("backup-destination-type") is None: - bdt = _pick(rawd, "backup_destination_type", "backupDestinationType") + bdt = _pick( + rawd, "backup_destination_type", "backupDestinationType" + ) if bdt is not None: out_dict["backup-destination-type"] = bdt if out_dict.get("retention-period-in-days") is None: - rpd = _pick(rawd, "retention_period_in_days", "retentionPeriodInDays") + rpd = _pick( + rawd, "retention_period_in_days", "retentionPeriodInDays" + ) if rpd is not None: out_dict["retention-period-in-days"] = rpd if out_dict.get("retention-period-in-years") is None: - rpy = _pick(rawd, "retention_period_in_years", "retentionPeriodInYears") + rpy = _pick( + rawd, "retention_period_in_years", "retentionPeriodInYears" + ) if rpy is not None: out_dict["retention-period-in-years"] = rpy @@ -1869,7 +1975,11 @@ def _pick(d: dict, *keys: str): for d in ditems: d_dict = _to_dict(d) dbid = d_dict.get("id") or getattr(d, "id", None) - dun = d_dict.get("dbUniqueName") or d_dict.get("db_unique_name") or getattr(d, "db_unique_name", None) + dun = ( + d_dict.get("dbUniqueName") + or d_dict.get("db_unique_name") + or getattr(d, "db_unique_name", None) + ) is_auto = _is_auto_backup_enabled_from_dict(d_dict) if is_auto is False and dbid: # fallback to GET for authoritative value and db_unique_name @@ -1878,7 +1988,9 @@ def _pick(d: dict, *keys: str): gdd = _to_dict(getattr(g, "data", None)) is_auto = _is_auto_backup_enabled_from_dict(gdd) if dun is None: - dun = gdd.get("dbUniqueName") or gdd.get("db_unique_name") + dun = gdd.get("dbUniqueName") or gdd.get( + "db_unique_name" + ) except Exception: is_auto = False if dbid: @@ -1887,7 +1999,9 @@ def _pick(d: dict, *keys: str): if is_auto: eligible_db_ids.append(dbid) has_next = bool(getattr(dresp, "has_next_page", False)) - next_db_page = getattr(dresp, "next_page", None) if has_next else None + next_db_page = ( + getattr(dresp, "next_page", None) if has_next else None + ) if not has_next: break # Aggregate backups for eligible DBs @@ -1909,9 +2023,13 @@ def _pick(d: dict, *keys: str): raise -@mcp.tool(description=( - "Gets a database backup by OCID and returns a clean dictionary. It includes common fields like database size, backup destination, and the database's unique name. The result is one backup with those helpful fields included." -)) +@mcp.tool( + description=( + "Gets a database backup by OCID and returns a clean dictionary. It includes " + "common fields like database size, backup destination, and the database's " + "unique name. The result is one backup with those helpful fields included." + ) +) def get_backup( backup_id: Annotated[str, "OCID of the Backup to retrieve."], region: Annotated[ @@ -1950,7 +2068,9 @@ def _pick(d: dict, *keys: str): return None if out.get("database-size-in-gbs") is None: - ds = _pick(rawd, "database_size_in_gbs", "databaseSizeInGBs", "databaseSizeInGbs") + ds = _pick( + rawd, "database_size_in_gbs", "databaseSizeInGBs", "databaseSizeInGbs" + ) if ds is not None: out["database-size-in-gbs"] = ds if out.get("backup-destination-type") is None: @@ -1971,20 +2091,49 @@ def _pick(d: dict, *keys: str): dbid = out.get("database_id") or rawd.get("databaseId") if (out.get("backup-destination-type") is None) and dbid: gdb = client.get_database(database_id=dbid) - gdd = oci.util.to_dict(getattr(gdb, "data", None)) if hasattr(oci, "util") and hasattr(oci.util, "to_dict") else (getattr(getattr(gdb, "data", None), "__dict__", {}) or {}) - cfg = gdd.get("dbBackupConfig") or gdd.get("db_backup_config") or gdd.get("databaseBackupConfig") + gdd = ( + oci.util.to_dict(getattr(gdb, "data", None)) + if hasattr(oci, "util") and hasattr(oci.util, "to_dict") + else (getattr(getattr(gdb, "data", None), "__dict__", {}) or {}) + ) + cfg = ( + gdd.get("dbBackupConfig") + or gdd.get("db_backup_config") + or gdd.get("databaseBackupConfig") + ) details = None if isinstance(cfg, dict): - details = cfg.get("backupDestinationDetails") or cfg.get("backup_destination_details") + details = cfg.get("backupDestinationDetails") or cfg.get( + "backup_destination_details" + ) if not details: - details = gdd.get("backupDestinationDetails") or gdd.get("backup_destination_details") - det_list = details if isinstance(details, list) else ([details] if details else []) + details = gdd.get("backupDestinationDetails") or gdd.get( + "backup_destination_details" + ) + det_list = ( + details + if isinstance(details, list) + else ([details] if details else []) + ) types = [] for det in det_list: - dd = det if isinstance(det, dict) else (oci.util.to_dict(det) if hasattr(oci, "util") and hasattr(oci.util, "to_dict") else det.__dict__ if hasattr(det, "__dict__") else {}) + dd = ( + det + if isinstance(det, dict) + else ( + oci.util.to_dict(det) + if hasattr(oci, "util") and hasattr(oci.util, "to_dict") + else det.__dict__ if hasattr(det, "__dict__") else {} + ) + ) t = (dd or {}).get("type") or (dd or {}).get("destinationType") - tnorm = (str(t).upper() if t else None) - if tnorm in ("RECOVERY_SERVICE", "RECOVERY-SERVICE", "DBRS", "RECOVERY_SERVICE_BACKUP_DESTINATION"): + tnorm = str(t).upper() if t else None + if tnorm in ( + "RECOVERY_SERVICE", + "RECOVERY-SERVICE", + "DBRS", + "RECOVERY_SERVICE_BACKUP_DESTINATION", + ): types.append("DBRS") elif tnorm in ("OBJECT_STORE", "OBJECTSTORE", "OBJECT_STORAGE"): types.append("OBJECT_STORE") @@ -2001,7 +2150,11 @@ def _pick(d: dict, *keys: str): # Ensure db_unique_name on model and output try: - dbid = out.get("database_id") or rawd.get("databaseId") or rawd.get("database_id") + dbid = ( + out.get("database_id") + or rawd.get("databaseId") + or rawd.get("database_id") + ) if dbid: try: gdb = client.get_database(database_id=dbid) @@ -2040,10 +2193,14 @@ def _pick(d: dict, *keys: str): @mcp.tool( description=( - "Summarizes how databases in a compartment or DB Home are backed up. It can find DB Homes, looks at each database’s backup settings, can include the time of the most recent backup, and groups results by destination type while calling out databases that aren’t configured. The result is one summary object with counts, name lists, and per‑database details." + "Summarizes how databases in a compartment or DB Home are backed up. It can " + "find DB Homes, looks at each database’s backup settings, can include the time " + "of the most recent backup, and groups results by destination type while calling " + "out databases that aren’t configured. The result is one summary object with " + "counts, name lists, and per‑database details." ) ) -def summarise_protected_database_backup_destination( +def summarize_protected_database_backup_destination( compartment_id: Annotated[ Optional[str], "OCID of the compartment. If omitted, defaults to the tenancy/DEFAULT profile.", @@ -2064,9 +2221,7 @@ def summarise_protected_database_backup_destination( limit_per_home: Annotated[ Optional[int], "Max databases to fetch per DB Home." ] = None, - max_db_homes: Annotated[ - Optional[int], "Max number of DB Homes to scan." - ] = None, + max_db_homes: Annotated[Optional[int], "Max number of DB Homes to scan."] = None, max_total_databases: Annotated[ Optional[int], "Global cap on databases to scan." ] = None, @@ -2087,7 +2242,9 @@ def summarise_protected_database_backup_destination( list_dbs_method = getattr(db_client, "list_databases") db_summaries: list[Any] = [] if home_ids: - for hid in (home_ids[:max_db_homes] if (max_db_homes is not None) else home_ids): + for hid in ( + home_ids[:max_db_homes] if (max_db_homes is not None) else home_ids + ): call_kwargs = { "compartment_id": compartment_id, "db_home_id": hid, @@ -2108,7 +2265,10 @@ def summarise_protected_database_backup_destination( db_summaries.extend(data) elif data is not None: db_summaries.append(data) - if max_total_databases is not None and len(db_summaries) >= max_total_databases: + if ( + max_total_databases is not None + and len(db_summaries) >= max_total_databases + ): db_summaries = db_summaries[:max_total_databases] break has_next = bool(getattr(resp, "has_next_page", False)) @@ -2116,11 +2276,7 @@ def summarise_protected_database_backup_destination( if not has_next: break - # Build a map of database_id -> list of Protected Databases (from Recovery Service) - # This allows us to infer DBRS (Recovery Service) as a destination type even if DB backup config - # does not explicitly list it as a destination, by virtue of PD linkage. # Simplified: do not correlate via Recovery Protected Databases - pd_by_dbid: dict[str, list[dict]] = {} # Helper routines to normalize SDK objects and read fields across variants def _to_dict(o: Any) -> dict: @@ -2306,12 +2462,12 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: if did: dest_ids.append(did) - # Augment with Recovery Service protected database linkage - pds_for_db = pd_by_dbid.get(sid, []) - # Simplified: do not infer DBRS from Protected Database linkage - # Deduplicate and restrict to DBRS/OBJECT_STORE; prefer DBRS if both - dest_types = list(dict.fromkeys([t for t in dest_types if t in ("DBRS", "OBJECT_STORE")])) + dest_types = list( + dict.fromkeys( + [t for t in dest_types if t in ("DBRS", "OBJECT_STORE")] + ) + ) if "DBRS" in dest_types and "OBJECT_STORE" in dest_types: dest_types = ["DBRS"] dest_ids = list(dict.fromkeys([d for d in dest_ids if d])) @@ -2348,7 +2504,15 @@ def _read_backup_times_from_obj(o: Any) -> list[Any]: name_for_lists = db_name or sid if status == "CONFIGURED": # Select a single effective destination type: DBRS preferred over OBJECT_STORE - eff_type = "DBRS" if "DBRS" in dest_types else ("OBJECT_STORE" if "OBJECT_STORE" in dest_types else "UNKNOWN") + eff_type = ( + "DBRS" + if "DBRS" in dest_types + else ( + "OBJECT_STORE" + if "OBJECT_STORE" in dest_types + else "UNKNOWN" + ) + ) if eff_type in ("DBRS", "OBJECT_STORE"): counts_by_type[eff_type] = counts_by_type.get(eff_type, 0) + 1 db_names_by_type.setdefault(eff_type, []).append(name_for_lists) @@ -2411,13 +2575,16 @@ def _sorted_keep(xs: list[str]) -> list[str]: ) except Exception as e: logger.error( - f"Error in summarise_protected_database_backup_destination tool: {e}" + f"Error in summarize_protected_database_backup_destination tool: {e}" ) raise + @mcp.tool( description=( - "Lists database homes in a compartment with optional lifecycle filters, defaulting to your tenancy when no compartment is given, and handles paging for you. The result is a list of database home summaries." + "Lists database homes in a compartment with optional lifecycle filters, " + "defaulting to your tenancy when no compartment is given, and handles paging " + "for you. The result is a list of database home summaries." ) ) def list_db_homes( @@ -2463,9 +2630,12 @@ def list_db_homes( raise -@mcp.tool(description=( - "Gets a database home by OCID and returns it as a simple object. The result is one database home." -)) +@mcp.tool( + description=( + "Gets a database home by OCID and returns it as a simple object. The result " + "is one database home." + ) +) def get_db_home( db_home_id: Annotated[str, "OCID of the DB Home to retrieve."], region: Annotated[ @@ -2483,7 +2653,9 @@ def get_db_home( @mcp.tool( description=( - "Lists database systems in a compartment with optional lifecycle filters, defaulting to your tenancy when no compartment is given, and handles paging for you. The result is a list of database system summaries." + "Lists database systems in a compartment with optional lifecycle filters, " + "defaulting to your tenancy when no compartment is given, and handles paging " + "for you. The result is a list of database system summaries." ) ) def list_db_systems( @@ -2526,9 +2698,12 @@ def list_db_systems( raise -@mcp.tool(description=( - "Gets a database system by OCID and returns it as a convenient object. The result is one database system." -)) +@mcp.tool( + description=( + "Gets a database system by OCID and returns it as a convenient object. The " + "result is one database system." + ) +) def get_db_system( db_system_id: Annotated[str, "OCID of the DB System to retrieve."], region: Annotated[ diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py index f99e6a84..499d3296 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_database_tools.py @@ -34,14 +34,17 @@ async def test_list_databases(self, mock_get_db_client): mock_client.get_database.return_value = get_resp async with Client(mcp) as client: - call = await client.call_tool("list_databases", {"db_home_id": "home1"}) - result = call.structured_content["result"] + call_tool_result = await client.call_tool( + "list_databases", + {"db_home_id": "home1"}, + ) + result = call_tool_result.structured_content["result"] - assert isinstance(result, list) - assert len(result) == 1 - assert result[0]["id"] == "db1" - # db_backup_config should be present after enrichment - assert "db_backup_config" in result[0] + assert isinstance(result, list) + assert len(result) == 1 + assert result[0]["id"] == "db1" + # db_backup_config should be present after enrichment + assert "db_backup_config" in result[0] @pytest.mark.asyncio @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") @@ -69,11 +72,15 @@ async def test_get_database_sets_protection_policy( rec_client.list_protected_databases.return_value = pd_list_resp async with Client(mcp) as client: - call = await client.call_tool("get_database", {"database_id": "db1"}) - result = call.structured_content - assert result["id"] == "db1" - # Enriched field from correlation loop - assert result.get("protection_policy_id") == "pp1" + call_tool_result = await client.call_tool( + "get_database", + {"database_id": "db1"}, + ) + result = call_tool_result.structured_content + + assert result["id"] == "db1" + # Enriched field from correlation loop + assert result.get("protection_policy_id") == "pp1" @pytest.mark.asyncio @patch("oracle.oci_recovery_mcp_server.server.get_database_client") @@ -86,11 +93,15 @@ async def test_list_backups(self, mock_get_db_client): mock_client.list_backups.return_value = list_resp async with Client(mcp) as client: - call = await client.call_tool("list_backups", {}) - result = call.structured_content["result"] - assert isinstance(result, list) - assert len(result) == 1 - assert result[0]["id"] == "b1" + call_tool_result = await client.call_tool( + "list_backups", + {"database_id": "db1"}, + ) + result = call_tool_result.structured_content["result"] + + assert isinstance(result, list) + assert len(result) == 1 + assert result[0]["id"] == "b1" @pytest.mark.asyncio @patch("oracle.oci_recovery_mcp_server.server.get_database_client") @@ -103,56 +114,10 @@ async def test_get_backup(self, mock_get_db_client): mock_client.get_backup.return_value = get_resp async with Client(mcp) as client: - call = await client.call_tool("get_backup", {"backup_id": "b1"}) - result = call.structured_content - assert result["id"] == "b1" - - @pytest.mark.asyncio - @patch("oracle.oci_recovery_mcp_server.server._fetch_db_home_ids_for_compartment") - @patch("oracle.oci_recovery_mcp_server.server.get_recovery_client") - @patch("oracle.oci_recovery_mcp_server.server.get_database_client") - async def test_summarize_protected_database_backup_destination_dbrs_configured( - self, mock_get_db_client, mock_get_rec_client, mock_fetch_homes - ): - mock_fetch_homes.return_value = ["home1"] - - db_client = MagicMock() - rec_client = MagicMock() - mock_get_db_client.return_value = db_client - mock_get_rec_client.return_value = rec_client - - # One database summary in the discovered DB Home - list_db_resp = create_autospec(oci.response.Response) - list_db_resp.data = SimpleNamespace(items=[{"id": "dbA", "db_name": "DBA"}]) - db_client.list_databases.return_value = list_db_resp - - # Full DB details (no backup config required to still detect DBRS) - get_db_resp = create_autospec(oci.response.Response) - get_db_resp.data = {"id": "dbA"} - db_client.get_database.return_value = get_db_resp - - # Minimal non-empty backup list for the quick "has backups" probe path - list_bk_resp = create_autospec(oci.response.Response) - list_bk_resp.data = SimpleNamespace(items=[{"id": "bk1"}]) - db_client.list_backups.return_value = list_bk_resp - - # Protected databases listing correlates dbA -> implies DBRS destination - pd_list_resp = create_autospec(oci.response.Response) - pd_list_resp.has_next_page = False - pd_list_resp.next_page = None - pd_list_resp.data = SimpleNamespace(items=[{"id": "pd1", "databaseId": "dbA"}]) - rec_client.list_protected_databases.return_value = pd_list_resp - - async with Client(mcp) as client: - call = await client.call_tool( - "summarize_protected_database_backup_destination", - {"compartment_id": "ocid1.compartment.oc1..comp"}, + call_tool_result = await client.call_tool( + "get_backup", + {"backup_id": "b1"}, ) - result = call.structured_content - - # Expect 1 total DB, configured under DBRS - assert result["total_databases"] == 1 - counts = result["counts_by_destination_type"] - assert isinstance(counts, dict) - # DBRS should be detected due to Protected Database correlation - assert counts.get("DBRS", 0) >= 1 + result = call_tool_result.structured_content + + assert result["id"] == "b1" diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py index af242d87..7a70fdab 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/tests/test_recovery_tools.py @@ -280,16 +280,19 @@ async def test_summarize_backup_space_used(self, mock_get_client, mock_get_tenan mock_get_client.return_value = mock_client mock_list_response = create_autospec(oci.response.Response) - mock_list_response.data = [ - oci.recovery.models.ProtectedDatabaseSummary(id="pd1"), - oci.recovery.models.ProtectedDatabaseSummary(id="pd2"), - ] + pd1_summary = oci.recovery.models.ProtectedDatabaseSummary( + id="pd1", lifecycle_state="ACTIVE" + ) + pd2_summary = oci.recovery.models.ProtectedDatabaseSummary( + id="pd2", lifecycle_state="ACTIVE" + ) + mock_list_response.data = [pd1_summary, pd2_summary] mock_list_response.has_next_page = False mock_list_response.next_page = None mock_client.list_protected_databases.return_value = mock_list_response # Fallback path for metrics at summary level - mock_list_response.data[0].metrics = {"backup_space_used_in_gbs": 10.5} - mock_list_response.data[1].metrics = {"backup_space_used_in_gbs": 4.5} + pd1_summary.metrics = {"backup_space_used_in_gbs": 10.5} + pd2_summary.metrics = {"backup_space_used_in_gbs": 4.5} # PD1 metrics 10.5 GB, PD2 metrics 4.5 GB pd1 = oci.recovery.models.ProtectedDatabase(id="pd1") @@ -309,18 +312,21 @@ async def test_summarize_backup_space_used(self, mock_get_client, mock_get_tenan async with Client(mcp) as client: call_tool_result = await client.call_tool( "summarize_backup_space_used", - {"compartment_id": "ocid1.compartment.oc1..test"}, + { + "compartment_id": "ocid1.compartment.oc1..test", + "region": "us-ashburn-1", + }, ) result = call_tool_result.structured_content - total_scanned = result.get("total_databases_scanned") or result.get( - "totalDatabasesScanned" - ) - sum_gb = result.get("sum_backup_space_used_in_gbs") or result.get( - "sumBackupSpaceUsedInGBs" - ) - assert abs(sum_gb - 15.0) < 1e-9 - assert total_scanned is None or total_scanned >= 0 + total_scanned = result.get("total_databases_scanned") or result.get( + "totalDatabasesScanned" + ) + sum_gb = result.get("sum_backup_space_used_in_gbs") or result.get( + "sumBackupSpaceUsedInGBs" + ) + assert abs(sum_gb - 15.0) < 1e-9 + assert total_scanned == 2 @pytest.mark.asyncio @patch("oracle.oci_recovery_mcp_server.server.get_monitoring_client") diff --git a/src/oci-recovery-mcp-server/pyproject.toml b/src/oci-recovery-mcp-server/pyproject.toml index 9a2a5bf7..a4285b52 100644 --- a/src/oci-recovery-mcp-server/pyproject.toml +++ b/src/oci-recovery-mcp-server/pyproject.toml @@ -36,6 +36,7 @@ packages = ["oracle"] [dependency-groups] dev = [ + "black>=26.1.0", "pytest>=8.4.2", "pytest-asyncio>=1.2.0", ] From 861e1c4138c5f6dbee4c128ef515caeefea6d742 Mon Sep 17 00:00:00 2001 From: hagavisi Date: Fri, 6 Feb 2026 11:48:11 +0530 Subject: [PATCH 10/11] Fix descriptions --- .../oracle/oci_recovery_mcp_server/models.py | 12 ----------- .../oracle/oci_recovery_mcp_server/server.py | 20 +++++++++++++++---- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py index 17c13792..b96c9745 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/models.py @@ -1864,9 +1864,6 @@ class BackupSummary(OCIBaseModel): type: Optional[str] = Field(None, description="Backup type.") time_started: Optional[datetime] = Field(None, description="Start time (RFC3339).") time_ended: Optional[datetime] = Field(None, description="End time (RFC3339).") - time_created: Optional[datetime] = Field( - None, description="Creation time (RFC3339)." - ) retention_period_in_days: Optional[float] = Field( None, alias="retention-period-in-days", @@ -1920,9 +1917,6 @@ def map_backup_summary(b) -> BackupSummary | None: time_ended=getattr(b, "time_ended", None) or data.get("time_ended") or data.get("timeEnded"), - time_created=getattr(b, "time_created", None) - or data.get("time_created") - or data.get("timeCreated"), database_size_in_gbs=getattr(b, "database_size_in_gbs", None) or data.get("database_size_in_gbs") or data.get("databaseSizeInGBs") @@ -1952,9 +1946,6 @@ class Backup(OCIBaseModel): type: Optional[str] = Field(None, description="Backup type.") time_started: Optional[datetime] = Field(None, description="Start time (RFC3339).") time_ended: Optional[datetime] = Field(None, description="End time (RFC3339).") - time_created: Optional[datetime] = Field( - None, description="Creation time (RFC3339)." - ) database_version: Optional[str] = Field( None, description="Database version at backup time." ) @@ -2012,9 +2003,6 @@ def map_backup(b) -> Backup | None: time_ended=getattr(b, "time_ended", None) or data.get("time_ended") or data.get("timeEnded"), - time_created=getattr(b, "time_created", None) - or data.get("time_created") - or data.get("timeCreated"), database_size_in_gbs=getattr(b, "database_size_in_gbs", None) or data.get("database_size_in_gbs") or data.get("databaseSizeInGBs") diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index f1b0ea88..55583f8c 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -357,7 +357,13 @@ def get_compartment_by_name(compartment_name: str): "the match as JSON or a clear error if none is found." ) ) -def get_compartment_by_name_tool(name: str) -> str: +def get_compartment_by_name_tool( + name: Annotated[ + str, + "Compartment display name to search for (case-insensitive). Searches all " + "accessible ACTIVE compartments in the tenancy, including the root tenancy." + ] +) -> str: """Return a compartment matching the provided name""" compartment = get_compartment_by_name(name) if compartment: @@ -1470,9 +1476,15 @@ def get_recovery_service_subnet( ) ) def get_recovery_service_metrics( - compartment_id: str, - start_time: str, - end_time: str, + compartment_id: Annotated[str, "The OCID of the compartment to query metrics for."], + start_time: Annotated[ + str, + "Start time for the metric query. Provide a RFC3339/ISO-8601 timestamp." + ], + end_time: Annotated[ + str, + "End time for the metric query. Provide a RFC3339/ISO-8601 timestamp." + ], metricName: Annotated[ str, "The metric that the user wants to fetch. Currently we only support:" From 1a7077f9f27c15760c0adbd0e7a636b1f3bf33ff Mon Sep 17 00:00:00 2001 From: hagavisi Date: Tue, 10 Feb 2026 11:28:39 +0530 Subject: [PATCH 11/11] Add OCI Recovery Service Dashboard Prompt --- .../oci_recovery_service_dashboard.txt | 148 +++++++++++++++++ .../oracle/oci_recovery_mcp_server/server.py | 153 ++++++++++++++++++ 2 files changed, 301 insertions(+) create mode 100644 src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/data/prompts/oci_recovery_service_dashboard.txt diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/data/prompts/oci_recovery_service_dashboard.txt b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/data/prompts/oci_recovery_service_dashboard.txt new file mode 100644 index 00000000..495d5136 --- /dev/null +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/data/prompts/oci_recovery_service_dashboard.txt @@ -0,0 +1,148 @@ + + +You are an expert dashboard generator. You very well know how to generate a presentable charts for the executives . + Make sure the chart is loadable and there are no errors while loading chart. + + Visualise OCI Recovery - Dashboard charts in one html document with below metrics in the given compartment. +Display the Title - OCI Recovery - Dashboard +Under the Title, mention the compartment name. +Under this compartment name add a note : Generated using Recovery Service MCP Server +Add the date of report generation + +Main page ( Overview) + In first row, summarise base db systems based on backup destination - DBRS, OBJECT_STORE or UNCONFIGURED using donut chart - using tool summarise_protected_database_backup_destination . Use title - Databases categorised by backup destination. Replace DBRS with "RecoveryService” while generating the report. + In another frame, first row, second column, With title Protected Database Space : report total backup space used by protected databases - using tool summarize_backup_space_used. Note : Space used by ACTIVE/DELETE SCHEDULED protected databases + In Second Row first column, Summarise protected database based on lifecycle status using donut chart - Title - protected databases by lifecycle state. Make sure the values are based on actual data. Double check this + In Second row, summarise protected databases on health status - PROTECTED, WARNING, ALERT using donut chart - using tool summarize_protected_database_health - Title - ACTIVE protected databases by health state + In Second row, summarise protected databases on realtime redo status - ENABLED, DISABLED using donut chart - using tool summarize_protected_database_redo_status -- Title - ACTIVE protected databases by real time redo +In the Fourth row, Report the protected databases with OCID, database db unique name, health status, lifecycle state, redo status, backup space used  in tabular format . Filterable columns - Health status, Redo status , life cycle state . This data is very important. extract details very carefully from list protecetd datbase output and carefully map the columns to the OCIDs. Be very diligent while filling up this table. Dont miss the filters. + + +In another tab named - Backup Details +Tool to use list backups with compartment id as argument +You are an expert dashboard generator. You very well know how to generate a presentable charts for the executives . +Make sure the chart is loadable and there are no errors while loading chart. The lines in graph are clearly visible and well positioned. + +Generate a line chart showing backup creation timelines for databases in the mentioned compartment. Each database is represented by a distinct line, with points marking individual backup events based on 'time_started'. The chart is styled for executive presentation, with a clean layout, legend, and tooltips. +X axis - Creation date/ Start date +Y axis - db_unique_name +Generate a line chart showing time taken by each backup for databases in the mentioned compartment. Each database is represented by a distinct line, with points marking individual backup events based on ‘duration'. If user hovers over individual points then they should gets details such as exact duration and type of backup (FULL or INCREMENTAL)   + - X axis: Creation date / Start date + - Y axis: duration_minutes. +Give your insight on +If any backup has taken more time or less time than usual pattern +Is there any backup missing in the pattern +If backup is Manually taken or LTR call that out separately + +IMPORTANT:  +Make sure charts are loadable and renderable clearly in html. Double check this condition. +There should be no missing line charts +Donot add date/time on the points unless user hovers over that point. + + +In another tab named - Backup space usage +Tool to use get_recovery_service_metrics and resolution used 1 day. +Generate a line chart of backup space used by each protected databases in last 5 days - using tool get_recovery_service_metrics and metricName SpaceUsedForRecoveryWindow . Each protected database is represented by a distinct line with points marking individual space. +Give your insight on +If there are any anomolies in the space usage pattern. +Any other space anomoly you can think of + +Add an executive KPI summary row directly below the dashboard title. + +Create 4 KPI cards displayed horizontally using Bootstrap grid: + +1. Total Protected Databases +2. Healthy Databases (%) +3. Redo Shipping Enabled (%) +4. Total Backup Space Used (GB) + +Rules: + +- Use existing dashboard data to calculate KPI values. +- KPI cards must be visually compact and equal height. +- Each KPI card should contain: + - Small uppercase label + - Large bold metric value + - Optional subtext (e.g. "of 3 databases") + +Styling: + +- Use soft card backgrounds with rounded corners. +- Center-align KPI content. +- Use large font (2–2.5rem) for KPI numbers. +- Use subtle shadows. +- Maintain spacing with Bootstrap g-4. + +Layout: + +- KPI row must appear ABOVE all charts. +- Dashboard width remains 75vw. +- On mobile, stack KPI cards vertically. + +Data logic: + +- Total Protected Databases = count of protected databases. +- Healthy % = (PROTECTED / total) * 100. +- Redo Enabled % = (ENABLED / total) * 100. +- Total Backup Space = sum of space used. + +After KPIs, keep charts as secondary visual detail. + + + IMPORTANT + +Layout rules for HTML dashboard: + +- Wrap all content in a .dashboard-container: + width: 75vw; + max-width: 1200px; + margin: 0 auto; +- Center the dashboard horizontally. +- Do NOT use full screen width. +- On mobile (<768px), expand dashboard to 95vw. + +Chart container sizing: + +- Do NOT use a single default height for all charts. +- Overview donut charts MUST be compact: + height: 260px. +- Timeline / line charts: + height: 360px. +- Space usage charts: + height: 300px. +- Apply CSS classes per chart type (overview-donut, timeline-chart, space-chart). +- Ensure donuts appear compact and not vertically stretched. + +When using Chart.js: + +- NEVER use custom HTML legends. +- NEVER use absolute positioning over canvas. +- Always use native Chart.js legend positioned on the right. +- Add layout.padding = 20 to every chart. +- For doughnut charts: + cutout: '65%' + radius: '85%' +- Ensure charts fit inside containers without clipping. +- Avoid overlapping elements. +- Maintain clean spacing between panels. +- Optimize layout for presentation-quality visuals. +Make sure chart sizes are equal and fit well inside demarcation. +Make sure there are no rendering issues +Dont generate trucated html file. +Make sure its a complete file wihtout any syntax issues +Make sure legends are written on the rightside of donut chart and with in frame +Show all the legends and next to legends mention the count as well in brackets +Make sure titles are on the top of the donut chart . +Makesure title, donut chart and legends fit with in the frame + +Always render charts for every tab +Include required JS adapters +Initialize charts after tab activation + + +Use soft colors in the chart - executive appealing colors +Demarcate between the rows Since this is a dashboard make sure user gets the visbility of most charts without scrolling. +Create a responsive layout minimizing scrolling + + + diff --git a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py index 55583f8c..4aef1884 100644 --- a/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py +++ b/src/oci-recovery-mcp-server/oracle/oci_recovery_mcp_server/server.py @@ -100,6 +100,149 @@ - get_db_system """ +OCI_RECOVERY_SERVICE_DASHBOARD_PROMPT = """You are an expert dashboard generator. You very well know how to generate a presentable charts for the executives . + Make sure the chart is loadable and there are no errors while loading chart. + + Visualise OCI Recovery - Dashboard charts in one html document with below metrics in the given compartment. +Display the Title - OCI Recovery - Dashboard +Under the Title, mention the compartment name. +Under this compartment name add a note : Generated using Recovery Service MCP Server +Add the date of report generation + +Main page ( Overview) + In first row, summarise base db systems based on backup destination - DBRS, OBJECT_STORE or UNCONFIGURED using donut chart - using tool summarise_protected_database_backup_destination . Use title - Databases categorised by backup destination. Replace DBRS with "RecoveryService” while generating the report. + In another frame, first row, second column, With title Protected Database Space : report total backup space used by protected databases - using tool summarize_backup_space_used. Note : Space used by ACTIVE/DELETE SCHEDULED protected databases + In Second Row first column, Summarise protected database based on lifecycle status using donut chart - Title - protected databases by lifecycle state. Make sure the values are based on actual data. Double check this + In Second row, summarise protected databases on health status - PROTECTED, WARNING, ALERT using donut chart - using tool summarize_protected_database_health - Title - ACTIVE protected databases by health state + In Second row, summarise protected databases on realtime redo status - ENABLED, DISABLED using donut chart - using tool summarize_protected_database_redo_status -- Title - ACTIVE protected databases by real time redo +In the Fourth row, Report the protected databases with OCID, database db unique name, health status, lifecycle state, redo status, backup space used  in tabular format . Filterable columns - Health status, Redo status , life cycle state . This data is very important. extract details very carefully from list protecetd datbase output and carefully map the columns to the OCIDs. Be very diligent while filling up this table. Dont miss the filters. + + +In another tab named - Backup Details +Tool to use list backups with compartment id as argument +You are an expert dashboard generator. You very well know how to generate a presentable charts for the executives . +Make sure the chart is loadable and there are no errors while loading chart. The lines in graph are clearly visible and well positioned. + +Generate a line chart showing backup creation timelines for databases in the mentioned compartment. Each database is represented by a distinct line, with points marking individual backup events based on 'time_started'. The chart is styled for executive presentation, with a clean layout, legend, and tooltips. +X axis - Creation date/ Start date +Y axis - db_unique_name +Generate a line chart showing time taken by each backup for databases in the mentioned compartment. Each database is represented by a distinct line, with points marking individual backup events based on ‘duration'. If user hovers over individual points then they should gets details such as exact duration and type of backup (FULL or INCREMENTAL)   + - X axis: Creation date / Start date + - Y axis: duration_minutes. +Give your insight on +If any backup has taken more time or less time than usual pattern +Is there any backup missing in the pattern +If backup is Manually taken or LTR call that out separately + +IMPORTANT:  +Make sure charts are loadable and renderable clearly in html. Double check this condition. +There should be no missing line charts +Donot add date/time on the points unless user hovers over that point. + + +In another tab named - Backup space usage +Tool to use get_recovery_service_metrics and resolution used 1 day. +Generate a line chart of backup space used by each protected databases in last 5 days - using tool get_recovery_service_metrics and metricName SpaceUsedForRecoveryWindow . Each protected database is represented by a distinct line with points marking individual space. +Give your insight on +If there are any anomolies in the space usage pattern. +Any other space anomoly you can think of + +Add an executive KPI summary row directly below the dashboard title. + +Create 4 KPI cards displayed horizontally using Bootstrap grid: + +1. Total Protected Databases +2. Healthy Databases (%) +3. Redo Shipping Enabled (%) +4. Total Backup Space Used (GB) + +Rules: + +- Use existing dashboard data to calculate KPI values. +- KPI cards must be visually compact and equal height. +- Each KPI card should contain: + - Small uppercase label + - Large bold metric value + - Optional subtext (e.g. "of 3 databases") + +Styling: + +- Use soft card backgrounds with rounded corners. +- Center-align KPI content. +- Use large font (2–2.5rem) for KPI numbers. +- Use subtle shadows. +- Maintain spacing with Bootstrap g-4. + +Layout: + +- KPI row must appear ABOVE all charts. +- Dashboard width remains 75vw. +- On mobile, stack KPI cards vertically. + +Data logic: + +- Total Protected Databases = count of protected databases. +- Healthy % = (PROTECTED / total) * 100. +- Redo Enabled % = (ENABLED / total) * 100. +- Total Backup Space = sum of space used. + +After KPIs, keep charts as secondary visual detail. + + + IMPORTANT + +Layout rules for HTML dashboard: + +- Wrap all content in a .dashboard-container: + width: 75vw; + max-width: 1200px; + margin: 0 auto; +- Center the dashboard horizontally. +- Do NOT use full screen width. +- On mobile (<768px), expand dashboard to 95vw. + +Chart container sizing: + +- Do NOT use a single default height for all charts. +- Overview donut charts MUST be compact: + height: 260px. +- Timeline / line charts: + height: 360px. +- Space usage charts: + height: 300px. +- Apply CSS classes per chart type (overview-donut, timeline-chart, space-chart). +- Ensure donuts appear compact and not vertically stretched. + +When using Chart.js: + +- NEVER use custom HTML legends. +- NEVER use absolute positioning over canvas. +- Always use native Chart.js legend positioned on the right. +- Add layout.padding = 20 to every chart. +- For doughnut charts: + cutout: '65%' + radius: '85%' +- Ensure charts fit inside containers without clipping. +- Avoid overlapping elements. +- Maintain clean spacing between panels. +- Optimize layout for presentation-quality visuals. +Make sure chart sizes are equal and fit well inside demarcation. +Make sure there are no rendering issues +Dont generate trucated html file. +Make sure its a complete file wihtout any syntax issues +Make sure legends are written on the rightside of donut chart and with in frame +Show all the legends and next to legends mention the count as well in brackets +Make sure titles are on the top of the donut chart . +Makesure title, donut chart and legends fit with in the frame + +Always render charts for every tab +Include required JS adapters +Initialize charts after tab activation + +Use soft colors in the chart - executive appealing colors +Demarcate between the rows Since this is a dashboard make sure user gets the visbility of most charts without scrolling. +Create a responsive layout minimizing scrolling""" + # Logging setup def setup_logging(): @@ -2731,6 +2874,16 @@ def get_db_system( raise +@mcp.prompt( + name="oci_recovery_service_dashboard_prompt", + description="Returns the OCI Recovery Service Dashboard prompt." +) +def oci_recovery_service_dashboard_prompt(): + return [{ + "role": "system", + "content": OCI_RECOVERY_SERVICE_DASHBOARD_PROMPT + }] + def main(): # Entrypoint: choose transport based on env; always log startup meta and log file location host = os.getenv("ORACLE_MCP_HOST")