diff --git a/.claude/skills/netgraph-dev-workflow/SKILL.md b/.claude/skills/netgraph-dev-workflow/SKILL.md new file mode 100644 index 0000000..7030757 --- /dev/null +++ b/.claude/skills/netgraph-dev-workflow/SKILL.md @@ -0,0 +1,228 @@ +--- +name: netgraph-dev-workflow +description: > + NetGraph development workflow and CLI reference. Use when: setting up dev environment, + running tests or lint, fixing CI failures, using the ngraph CLI (inspect, run commands), + troubleshooting venv issues, or asking about make targets. +--- + +# NetGraph Development Workflow + +## Command Discovery + +Run `make help` to see all available targets with descriptions. The Makefile is the authoritative source. + +## Initial Setup + +```bash +make dev # Creates venv, installs package + dev deps, sets up pre-commit hooks +``` + +Requires Python 3.11+. The setup auto-detects the best available Python version. + +## Workflow Patterns + +### Iterating on Code + +| Scenario | Command | Notes | +|----------|---------|-------| +| Quick feedback loop | `make qt` | Fast: skips slow tests, no coverage | +| Before committing | `make check` | **Mandatory.** Runs pre-commit + tests + lint | +| Full test suite | `make test` | Includes slow tests, generates coverage | +| After changing APIs | `make docs` | **Mandatory.** Regenerates API documentation | + +### When to Run `make docs` + +Run `make docs` when modifying: +- Public function/class signatures +- Docstrings +- Module-level documentation +- Adding new public modules + +The generated `docs/reference/api-full.md` should be committed with your changes. + +### Fixing Lint Failures + +When `make lint` fails: + +1. Run `make format` first (auto-fixes formatting issues) +2. Run `make lint` again to check remaining issues +3. Fix any type errors or code quality issues manually + +### Pre-commit vs CI + +| Command | Behavior | Use case | +|---------|----------|----------| +| `make check` | Auto-fixes via pre-commit, then tests + lint | Local development | +| `make check-ci` | Read-only lint + validate + test (no mutations) | CI pipelines | + +### Validating Scenarios + +```bash +make validate # Validates all YAML files in scenarios/ and tests/integration/ +``` + +## CLI Reference + +The `ngraph` CLI runs and inspects network scenarios. Use `ngraph --help` or `ngraph --help` for full option details. + +### Commands + +| Command | Purpose | +|---------|---------| +| `ngraph inspect ` | Validate scenario and show structure summary | +| `ngraph run ` | Execute workflow steps and export results | + +### Common Usage Patterns + +**Validate before running:** + +```bash +./venv/bin/ngraph inspect scenario.yaml +./venv/bin/ngraph run scenario.yaml +``` + +**Detailed inspection (node/link tables, step parameters):** + +```bash +./venv/bin/ngraph inspect --detail scenario.yaml +``` + +**Run with profiling (CPU analysis, bottleneck detection):** + +```bash +./venv/bin/ngraph run --profile scenario.yaml +./venv/bin/ngraph run --profile --profile-memory scenario.yaml # Include memory tracking +``` + +**Control output:** + +```bash +./venv/bin/ngraph run scenario.yaml # Default: writes .results.json +./venv/bin/ngraph run scenario.yaml --results out.json # Custom results path +./venv/bin/ngraph run scenario.yaml --output ./results/ # All artifacts to directory +./venv/bin/ngraph run scenario.yaml --no-results --stdout # Print to stdout only +./venv/bin/ngraph run scenario.yaml --keys msd placement # Filter to specific workflow steps +``` + +**Debug logging:** + +```bash +./venv/bin/ngraph -v inspect scenario.yaml # Verbose (debug level) +./venv/bin/ngraph --quiet run scenario.yaml # Suppress info, show warnings only +``` + +### Key Options + +**Global:** +- `-v, --verbose` - Enable debug logging +- `--quiet` - Suppress console output (warnings only) + +**inspect:** +- `-d, --detail` - Show complete node/link tables and step parameters +- `-o, --output DIR` - Output directory for artifacts + +**run:** +- `-r, --results FILE` - Custom results JSON path +- `--no-results` - Skip writing results file +- `--stdout` - Print results to stdout +- `-k, --keys STEP [STEP...]` - Filter output to specific workflow step names +- `--profile` - Enable CPU profiling with analysis +- `--profile-memory` - Add memory tracking (requires `--profile`) +- `-o, --output DIR` - Output directory for results and profiles + +### Output Interpretation + +**inspect output sections:** +1. **OVERVIEW** - Quick metrics: nodes, links, capacity, demand, utilization +2. **NETWORK STRUCTURE** - Hierarchy tree, capacity statistics, validation warnings +3. **RISK GROUPS** - Failure correlation groups defined +4. **COMPONENTS LIBRARY** - Hardware definitions for cost/power modeling +5. **FAILURE POLICIES** - Failure simulation rules and modes +6. **DEMAND SETS** - Traffic demands with pattern matching summary +7. **WORKFLOW STEPS** - Execution plan with node selection preview + +**run output:** +- Results JSON contains `steps..data` for each workflow step +- With `--profile`: performance report shows timing breakdown and bottlenecks + +## Running Commands + +### Prefer Direct Venv Paths + +Use direct paths instead of `source venv/bin/activate`: + +```bash +./venv/bin/python -m pytest tests/dsl/ # Run specific tests +./venv/bin/ngraph inspect scenario.yaml # Validate a scenario +./venv/bin/ngraph run scenario.yaml # Execute a scenario +``` + +This avoids shell state issues that agents may not maintain between commands. + +### Make Targets (Alternative) + +Make targets auto-detect the venv and work without activation: + +```bash +make qt # Quick tests +make lint # Linting checks +``` + +## Troubleshooting + +### Venv Missing or Broken + +**Symptom**: Commands fail with "No module named..." or venv/bin/python not found + +**Fix**: + +```bash +make dev # Recreates venv if missing, installs all deps +``` + +Or to fully reset: + +```bash +make clean-venv && make dev +``` + +### Python Version Mismatch + +**Symptom**: `make dev` warns about venv Python version != best available + +**Fix**: Recreate venv with latest Python: + +```bash +make clean-venv && make dev +``` + +### Import Errors After Pulling Changes + +**Symptom**: New imports fail after git pull + +**Fix**: Reinstall the package: + +```bash +./venv/bin/pip install -e '.[dev]' +``` + +Or run `make dev` which handles this. + +### Pre-commit Hook Failures + +**Symptom**: Commit blocked by pre-commit hooks + +**Fix**: + +1. Run `make format` to auto-fix formatting +2. Run `make check` to see remaining issues +3. Fix manually and commit again + +### Tests Pass Locally but Fail in CI + +**Symptom**: `make qt` passes but CI fails + +**Cause**: `make qt` skips slow tests; CI runs full suite + +**Fix**: Run `make test` locally to match CI behavior. diff --git a/.claude/skills/netgraph-dsl/SKILL.md b/.claude/skills/netgraph-dsl/SKILL.md index 28adf21..f84c19a 100644 --- a/.claude/skills/netgraph-dsl/SKILL.md +++ b/.claude/skills/netgraph-dsl/SKILL.md @@ -3,14 +3,9 @@ name: netgraph-dsl description: > NetGraph scenario DSL for defining network topologies, traffic demands, failure policies, and analysis workflows in YAML. Use when: creating or editing .yaml/.yml network scenarios, - defining nodes/links/groups, writing adjacency rules, configuring selectors or blueprints, - setting up traffic matrices or failure policies, debugging DSL syntax or validation errors, + defining nodes/links/groups, writing link rules with patterns, configuring selectors or blueprints, + setting up traffic demands or failure policies, debugging DSL syntax or validation errors, or asking about NetGraph scenario structure. -license: MIT -metadata: - author: "netgraph" - version: "1.1" - repo: "https://github.com/networmix/NetGraph" --- # NetGraph DSL @@ -25,7 +20,7 @@ Define network simulation scenarios in YAML format. When working with NetGraph scenarios: 1. **Creating new scenarios**: Start with the [Minimal Example](#minimal-example), then add sections as needed -2. **Editing existing scenarios**: Identify the relevant section (network, traffic_matrix_set, failure_policy_set, etc.) +2. **Editing existing scenarios**: Identify the relevant section (network, demands, failures, etc.) 3. **Understanding selection**: Review [Selection Models](#selection-models) to understand path-based vs condition-based selection 4. **Debugging issues**: Check [Common Pitfalls](#common-pitfalls) and [Validation Checklist](#validation-checklist) 5. **Complex topologies**: Use [Blueprints](#blueprints) for reusable patterns @@ -37,13 +32,13 @@ Refer to specific sections below for detailed syntax and examples. | Section | Purpose | |---------|---------| -| `network` | Topology: nodes, links, groups, adjacency (required) | +| `network` | Topology: nodes, links (required) | | `blueprints` | Reusable topology templates | | `components` | Hardware library for cost/power modeling | | `risk_groups` | Failure correlation groups | | `vars` | YAML anchors for value reuse | -| `traffic_matrix_set` | Traffic demand definitions | -| `failure_policy_set` | Failure simulation rules | +| `demands` | Traffic demand definitions | +| `failures` | Failure simulation rules | | `workflow` | Analysis execution steps | | `seed` | Master seed for reproducibility | @@ -57,9 +52,8 @@ network: links: - source: A target: B - link_params: - capacity: 100 - cost: 1 + capacity: 100 + cost: 1 ``` ## Core Patterns @@ -68,7 +62,7 @@ network: The DSL implements two distinct selection patterns: -**1. Path-based Node Selection** (adjacency rules, traffic demands, workflow steps) +**1. Path-based Node Selection** (link rules, traffic demands, workflow steps) - Uses regex patterns on hierarchical node names - Supports capture group-based grouping @@ -78,9 +72,9 @@ The DSL implements two distinct selection patterns: **2. Condition-based Entity Selection** (failure rules, membership rules, risk group generation) -- Works on nodes, links, or risk_groups (`entity_scope`) -- Uses only attribute-based filtering (`conditions`) -- No path/regex patterns (operates on all entities of specified type) +- Works on nodes/links; failure + membership can also target risk_groups +- Optional regex `path` filter on entity names/IDs (no capture grouping) +- Attribute filtering via `match.conditions` (`match.logic` defaults vary by context) These patterns share common primitives (condition evaluation, match specification) but serve different purposes and should not be confused. @@ -103,35 +97,34 @@ network: links: - source: Seattle target: Portland - link_params: # Required wrapper for link parameters - capacity: 100 - cost: 10 - attrs: - distance_km: 280 - link_count: 2 # Parallel links + capacity: 100 # Direct properties (no wrapper) + cost: 10 + attrs: + distance_km: 280 + count: 2 # Parallel links ``` ### Node Groups ```yaml network: - groups: + nodes: leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf{n}" attrs: role: leaf ``` -Creates: `leaf/leaf-1`, `leaf/leaf-2`, `leaf/leaf-3`, `leaf/leaf-4` +Creates: `leaf/leaf1`, `leaf/leaf2`, `leaf/leaf3`, `leaf/leaf4` ### Template Syntaxes | Syntax | Example | Context | |--------|---------|---------| | `[1-3]` | `dc[1-3]/rack` | Group names, risk groups | -| `$var`/`${var}` | `pod${p}/leaf` | Adjacency & demand selectors | -| `{node_num}` | `srv-{node_num}` | `name_template` field | +| `$var`/`${var}` | `pod${p}/leaf` | Link & demand selectors | +| `{n}` | `srv{n}` | `template` field | These are NOT interchangeable. See [REFERENCE.md](references/REFERENCE.md) for details. @@ -139,26 +132,25 @@ These are NOT interchangeable. See [REFERENCE.md](references/REFERENCE.md) for d ```yaml network: - groups: + nodes: dc[1-3]/rack[a,b]: # Cartesian product - node_count: 4 - name_template: "srv-{node_num}" + count: 4 + template: "srv{n}" ``` Creates: `dc1/racka`, `dc1/rackb`, `dc2/racka`, `dc2/rackb`, `dc3/racka`, `dc3/rackb` **Scope**: Bracket expansion works in group names, risk group definitions (including children), and risk group membership arrays. Component names and other fields treat brackets as literal characters. -### Adjacency Patterns +### Link Patterns ```yaml network: - adjacency: + links: - source: /leaf target: /spine pattern: mesh # Every source to every target - link_params: - capacity: 100 + capacity: 100 - source: /group_a # 4 nodes target: /group_b # 2 nodes @@ -168,26 +160,27 @@ network: ### Selectors with Conditions ```yaml -adjacency: - - source: - path: "/datacenter" - match: - logic: and # "and" or "or"; defaults vary by context (see below) - conditions: - - attr: role - operator: "==" - value: leaf - target: /spine - pattern: mesh +network: + links: + - source: + path: "/datacenter" + match: + logic: and # "and" or "or"; defaults vary by context (see below) + conditions: + - attr: role + op: "==" + value: leaf + target: /spine + pattern: mesh ``` -**Operators**: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `in`, `not_in`, `any_value`, `no_value` +**Operators**: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `in`, `not_in`, `exists`, `not_exists` -**Logic defaults by context**: +**Logic defaults by context (for `match.logic`)**: | Context | Default `logic` | Rationale | |---------|-----------------|-----------| -| Adjacency `match` | `"or"` | Inclusive: match any condition | +| Link `match` | `"or"` | Inclusive: match any condition | | Demand `match` | `"or"` | Inclusive: match any condition | | Membership rules | `"and"` | Precise: must match all conditions | | Failure rules | `"or"` | Inclusive: match any condition | @@ -205,13 +198,14 @@ source: "^(dc\\d+)/(spine|leaf)/.*" # Groups: dc1|spine, dc1|leaf, etc. ### Variable Expansion ```yaml -adjacency: +links: - source: "plane${p}/rack" target: "spine${s}" - expand_vars: - p: [1, 2] - s: [1, 2, 3] - expansion_mode: cartesian # or "zip" (equal-length lists required) + expand: + vars: + p: [1, 2] + s: [1, 2, 3] + mode: cartesian # or "zip" (equal-length lists required) pattern: mesh ``` @@ -220,38 +214,73 @@ adjacency: ```yaml blueprints: clos_pod: - groups: + nodes: leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf{n}" spine: - node_count: 2 - name_template: "spine-{node_num}" - adjacency: + count: 2 + template: "spine{n}" + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 100 + capacity: 100 network: - groups: + nodes: pod[1-2]: - use_blueprint: clos_pod - parameters: - leaf.node_count: 6 # Override defaults + blueprint: clos_pod + params: + leaf.count: 6 # Override defaults ``` +**Alternative: Inline nested nodes** (no blueprint needed): + +```yaml +network: + nodes: + datacenter: + nodes: # Inline hierarchy + rack1: + count: 2 + template: "srv{n}" +``` + +### Node and Link Rules + +Modify entities after creation with optional attribute filtering: + +```yaml +network: + node_rules: + - path: "^pod1/.*" + match: # Optional: filter by attributes + conditions: + - {attr: role, op: "==", value: compute} + disabled: true + + link_rules: + - source: "^pod1/.*" + target: "^pod2/.*" + link_match: # Optional: filter by link attributes + conditions: + - {attr: capacity, op: ">=", value: 400} + cost: 99 +``` + +Rules also support `expand` for variable-based application. + ### Traffic Demands ```yaml -traffic_matrix_set: +demands: production: - source: "^dc1/.*" - sink: "^dc2/.*" - demand: 1000 + target: "^dc2/.*" + volume: 1000 mode: pairwise # or "combine" - flow_policy_config: SHORTEST_PATHS_ECMP + flow_policy: SHORTEST_PATHS_ECMP ``` **Flow policies**: `SHORTEST_PATHS_ECMP`, `SHORTEST_PATHS_WCMP`, `TE_WCMP_UNLIM`, `TE_ECMP_16_LSP`, `TE_ECMP_UP_TO_256_LSP` @@ -259,19 +288,19 @@ traffic_matrix_set: ### Failure Policies ```yaml -failure_policy_set: +failures: single_link: - fail_risk_groups: false # Expand to shared-risk entities - modes: # Weighted modes (one selected per iteration) + expand_groups: false # Expand to shared-risk entities + modes: # Weighted modes (one selected per iteration) - weight: 1.0 rules: - - entity_scope: link # node, link, or risk_group - rule_type: choice # all, choice, or random + - scope: link # Required: node, link, or risk_group + mode: choice # all, choice, or random count: 1 # Optional: weight_by: capacity # Weighted sampling by attribute ``` -**Rule types**: `all` (select all matches), `choice` (sample `count`), `random` (each with `probability`) +**Rule modes**: `all` (select all matches), `choice` (sample `count`), `random` (each with `probability`) ### Risk Groups @@ -285,29 +314,30 @@ risk_groups: - "RG2" # String shorthand (equivalent to {name: "RG2"}) ``` -**Membership rules** (assign entities by attribute matching): +**Membership rules** (assign entities by attribute matching; optional `path` filter): ```yaml risk_groups: - name: HighCapacityLinks membership: - entity_scope: link # node, link, or risk_group + scope: link # Required: node, link, or risk_group match: logic: and # "and" or "or" (default: "and" for membership) conditions: - attr: capacity - operator: ">=" + op: ">=" value: 1000 ``` -**Generate blocks** (create groups from unique attribute values): +**Generate blocks** (create groups from unique attribute values; optional `path` filter): ```yaml risk_groups: - generate: - entity_scope: node # node or link only + scope: node # Required: node or link only + path: "^prod_.*" # Optional: narrow entities before grouping group_by: region # Any attribute to group by - name_template: "Region_${value}" + name: "Region_${value}" ``` **Validation:** Risk group references are validated at load time (undefined references and circular hierarchies detected). @@ -318,23 +348,23 @@ See [REFERENCE.md](references/REFERENCE.md) for complete details. ```yaml workflow: - - step_type: NetworkStats + - type: NetworkStats name: stats - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd - matrix_name: production + demand_set: production alpha_start: 1.0 resolution: 0.05 - - step_type: TrafficMatrixPlacement + - type: TrafficMatrixPlacement name: placement - matrix_name: production + demand_set: production failure_policy: single_link iterations: 1000 alpha_from_step: msd # Reference MSD result alpha_from_field: data.alpha_star - - step_type: MaxFlow + - type: MaxFlow source: "^(dc[1-3])$" - sink: "^(dc[1-3])$" + target: "^(dc[1-3])$" mode: pairwise failure_policy: single_link iterations: 1000 @@ -360,21 +390,21 @@ nodes: my_field: value ``` -### 2. Link parameters require `link_params` wrapper +### 2. Link properties are flattened (no wrapper) ```yaml # WRONG links: - source: A target: B - capacity: 100 # Error! + link_params: # Error! No wrapper + capacity: 100 # CORRECT links: - source: A target: B - link_params: - capacity: 100 + capacity: 100 # Direct property ``` ### 3. `one_to_one` requires compatible sizes @@ -404,26 +434,27 @@ source: "${dc}/leaf" ```yaml # WRONG -expand_vars: - a: [1, 2] - b: [x, y, z] # Length mismatch! -expansion_mode: zip +expand: + vars: + a: [1, 2] + b: [x, y, z] # Length mismatch! + mode: zip ``` ### 7. Processing order matters 1. Groups and direct nodes created -2. Node overrides applied -3. Adjacency and blueprint adjacencies expanded -4. Direct links created -5. Link overrides applied +2. Node rules applied +3. Blueprint links expanded +4. Top-level links expanded (including direct links) +5. Link rules applied -Overrides only affect entities that exist at their processing stage. +Rules only affect entities that exist at their processing stage. ## Validation Checklist - [ ] Custom fields inside `attrs` -- [ ] Link parameters inside `link_params` +- [ ] Link properties directly on link (no wrapper) - [ ] Referenced blueprints exist - [ ] Node names in direct links exist - [ ] `one_to_one` sizes have multiple factor @@ -433,4 +464,4 @@ Overrides only affect entities that exist at their processing stage. ## More Information - [Full DSL Reference](references/REFERENCE.md) - Complete field documentation, all operators, workflow steps -- [Working Examples](references/EXAMPLES.md) - 17 complete scenarios from simple to advanced +- [Working Examples](references/EXAMPLES.md) - 22 complete scenarios from simple to advanced diff --git a/.claude/skills/netgraph-dsl/references/EXAMPLES.md b/.claude/skills/netgraph-dsl/references/EXAMPLES.md index 58e34a9..18eb408 100644 --- a/.claude/skills/netgraph-dsl/references/EXAMPLES.md +++ b/.claude/skills/netgraph-dsl/references/EXAMPLES.md @@ -11,45 +11,44 @@ A basic leaf-spine topology with traffic analysis. ```yaml network: - groups: + nodes: leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf{n}" attrs: role: leaf spine: - node_count: 2 - name_template: "spine-{node_num}" + count: 2 + template: "spine{n}" attrs: role: spine - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 100 - cost: 1 + capacity: 100 + cost: 1 -traffic_matrix_set: +demands: default: - source: "^leaf/.*" - sink: "^leaf/.*" - demand: 50 + target: "^leaf/.*" + volume: 50 mode: pairwise -failure_policy_set: +failures: single_link: modes: - weight: 1.0 rules: - - entity_scope: link - rule_type: choice + - scope: link + mode: choice count: 1 workflow: - - step_type: TrafficMatrixPlacement + - type: TrafficMatrixPlacement name: placement - matrix_name: default + demand_set: default failure_policy: single_link iterations: 100 ``` @@ -63,42 +62,40 @@ Two pods sharing a blueprint, connected via spine layer. ```yaml blueprints: clos_pod: - groups: + nodes: leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf{n}" attrs: role: leaf spine: - node_count: 2 - name_template: "spine-{node_num}" + count: 2 + template: "spine{n}" attrs: role: spine - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 100 + capacity: 100 network: - groups: + nodes: pod[1-2]: - use_blueprint: clos_pod + blueprint: clos_pod - adjacency: + links: - source: path: "pod1/spine" match: conditions: - attr: role - operator: "==" + op: "==" value: spine target: path: "pod2/spine" pattern: mesh - link_params: - capacity: 400 + capacity: 400 ``` **Result**: 12 nodes (2 pods x 6 nodes), 20 links (16 internal + 4 inter-pod) @@ -118,22 +115,19 @@ network: # Parallel diverse paths - source: NewYork target: Chicago - link_params: - capacity: 100 - cost: 10 - risk_groups: [RG_NY_CHI] + capacity: 100 + cost: 10 + risk_groups: [RG_NY_CHI] - source: NewYork target: Chicago - link_params: - capacity: 100 - cost: 10 + capacity: 100 + cost: 10 # Single path - source: Chicago target: LosAngeles - link_params: - capacity: 100 - cost: 15 - risk_groups: [RG_CHI_LA] + capacity: 100 + cost: 15 + risk_groups: [RG_CHI_LA] risk_groups: - name: RG_NY_CHI @@ -145,13 +139,13 @@ risk_groups: corridor: Chicago-LA distance_km: 2800 -failure_policy_set: +failures: srlg_failure: modes: - weight: 1.0 rules: - - entity_scope: risk_group - rule_type: choice + - scope: risk_group + mode: choice count: 1 ``` @@ -163,30 +157,30 @@ Large fabric using variable expansion. ```yaml network: - groups: + nodes: plane[1-4]/rack[1-8]: - node_count: 48 - name_template: "server-{node_num}" + count: 48 + template: "server{n}" attrs: role: compute fabric/spine[1-4]: - node_count: 1 - name_template: "spine" + count: 1 + template: "spine" attrs: role: spine - adjacency: + links: - source: "plane${p}/rack${r}" target: "fabric/spine${s}" - expand_vars: - p: [1, 2, 3, 4] - r: [1, 2, 3, 4, 5, 6, 7, 8] - s: [1, 2, 3, 4] - expansion_mode: cartesian + expand: + vars: + p: [1, 2, 3, 4] + r: [1, 2, 3, 4, 5, 6, 7, 8] + s: [1, 2, 3, 4] + mode: cartesian pattern: mesh - link_params: - capacity: 100 + capacity: 100 ``` **Result**: 1540 nodes (4x8x48 compute + 4 spine), 6144 links @@ -208,51 +202,57 @@ network: links: - source: N1 target: N2 - link_params: {capacity: 2.0, cost: 1.0} + capacity: 2.0 + cost: 1.0 - source: N1 target: N3 - link_params: {capacity: 1.0, cost: 1.0} + capacity: 1.0 + cost: 1.0 - source: N1 target: N4 - link_params: {capacity: 2.0, cost: 1.0} + capacity: 2.0 + cost: 1.0 - source: N2 target: N3 - link_params: {capacity: 2.0, cost: 1.0} + capacity: 2.0 + cost: 1.0 - source: N2 target: N4 - link_params: {capacity: 1.0, cost: 1.0} + capacity: 1.0 + cost: 1.0 - source: N3 target: N4 - link_params: {capacity: 2.0, cost: 1.0} + capacity: 2.0 + cost: 1.0 -failure_policy_set: +failures: single_link_failure: modes: - weight: 1.0 rules: - - entity_scope: link - rule_type: choice + - scope: link + mode: choice count: 1 -traffic_matrix_set: +demands: baseline: - source: "^N([1-4])$" - sink: "^N([1-4])$" - demand: 12.0 + target: "^N([1-4])$" + volume: 12.0 mode: pairwise workflow: - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd - matrix_name: baseline + demand_set: baseline acceptance_rule: hard alpha_start: 1.0 resolution: 0.05 - - step_type: MaxFlow + - type: MaxFlow name: capacity_matrix source: "^(N[1-4])$" - sink: "^(N[1-4])$" + target: "^(N[1-4])$" mode: pairwise failure_policy: single_link_failure iterations: 1000 @@ -265,48 +265,47 @@ Using match conditions to filter nodes. ```yaml network: - groups: + nodes: servers: - node_count: 4 - name_template: "srv-{node_num}" + count: 4 + template: "srv{n}" attrs: role: compute rack: "rack-1" servers_b: - node_count: 2 - name_template: "srvb-{node_num}" + count: 2 + template: "srvb{n}" attrs: role: compute rack: "rack-9" switches: - node_count: 2 - name_template: "sw-{node_num}" + count: 2 + template: "sw{n}" attrs: tier: spine - adjacency: + links: - source: path: "/servers" match: logic: and conditions: - attr: role - operator: "==" + op: "==" value: compute - attr: rack - operator: "!=" + op: "!=" value: "rack-9" target: path: "/switches" match: conditions: - attr: tier - operator: "==" + op: "==" value: spine pattern: mesh - link_params: - capacity: 10 - cost: 1 + capacity: 10 + cost: 1 ``` **Result**: 8 nodes, 8 links (only rack-1 servers connect to switches) @@ -318,67 +317,65 @@ Customizing blueprint instances. ```yaml blueprints: bp1: - groups: + nodes: leaf: - node_count: 1 + count: 1 attrs: some_field: nested_key: 111 network: - groups: + nodes: Main: - use_blueprint: bp1 - parameters: + blueprint: bp1 + params: leaf.attrs.some_field.nested_key: 999 ``` -**Result**: Node `Main/leaf/leaf-1` has `attrs.some_field.nested_key = 999` +**Result**: Node `Main/leaf/leaf1` has `attrs.some_field.nested_key = 999` -## Example 8: Node and Link Overrides +## Example 8: Node and Link Rules Modifying topology after creation. ```yaml blueprints: test_bp: - groups: + nodes: switches: - node_count: 3 - name_template: "switch-{node_num}" + count: 3 + template: "switch{n}" network: - groups: + nodes: group1: - node_count: 2 - name_template: "node-{node_num}" + count: 2 + template: "node{n}" group2: - node_count: 2 - name_template: "node-{node_num}" + count: 2 + template: "node{n}" my_clos1: - use_blueprint: test_bp + blueprint: test_bp - adjacency: + links: - source: /group1 target: /group2 pattern: mesh - link_params: - capacity: 100 - cost: 10 + capacity: 100 + cost: 10 - node_overrides: - - path: "^my_clos1/switches/switch-(1|3)$" + node_rules: + - path: "^my_clos1/switches/switch(1|3)$" disabled: true attrs: maintenance_mode: active hw_type: newer_model - link_overrides: - - source: "^group1/node-1$" - target: "^group2/node-1$" - link_params: - capacity: 200 - cost: 5 + link_rules: + - source: "^group1/node1$" + target: "^group2/node1$" + capacity: 200 + cost: 5 ``` **Result**: Switches 1 and 3 disabled, specific link upgraded to 200 capacity @@ -392,66 +389,65 @@ seed: 42 blueprints: Clos_L16_S4: - groups: + nodes: spine: - node_count: 4 - name_template: spine{node_num} + count: 4 + template: spine{n} attrs: role: spine leaf: - node_count: 16 - name_template: leaf{node_num} + count: 16 + template: leaf{n} attrs: role: leaf - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 3200 - cost: 1 + capacity: 3200 + cost: 1 network: - groups: + nodes: metro1/pop[1-2]: - use_blueprint: Clos_L16_S4 + blueprint: Clos_L16_S4 attrs: metro_name: new-york node_type: pop -traffic_matrix_set: +demands: baseline: - source: "^metro1/pop1/.*" - sink: "^metro1/pop2/.*" - demand: 15000.0 + target: "^metro1/pop2/.*" + volume: 15000.0 mode: pairwise - flow_policy_config: TE_WCMP_UNLIM + flow_policy: TE_WCMP_UNLIM -failure_policy_set: +failures: single_link: modes: - weight: 1.0 rules: - - entity_scope: link - rule_type: choice + - scope: link + mode: choice count: 1 workflow: - - step_type: NetworkStats + - type: NetworkStats name: network_statistics - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd_baseline - matrix_name: baseline + demand_set: baseline acceptance_rule: hard alpha_start: 1.0 growth_factor: 2.0 resolution: 0.05 - - step_type: TrafficMatrixPlacement + - type: TrafficMatrixPlacement name: tm_placement seed: 42 - matrix_name: baseline + demand_set: baseline failure_policy: single_link iterations: 1000 parallelism: 7 @@ -474,18 +470,18 @@ network: links: - source: dc1_srv1 target: dc2_srv1 - link_params: {capacity: 100} + capacity: 100 - source: dc1_srv2 target: dc2_srv2 - link_params: {capacity: 100} + capacity: 100 -traffic_matrix_set: +demands: inter_dc: - source: group_by: dc - sink: + target: group_by: dc - demand: 100 + volume: 100 mode: pairwise ``` @@ -506,16 +502,20 @@ network: links: - source: core1 target: core2 - link_params: {capacity: 1000, risk_groups: [RG_core]} + capacity: 1000 + risk_groups: [RG_core] - source: core1 target: edge1 - link_params: {capacity: 400, risk_groups: [RG_west]} + capacity: 400 + risk_groups: [RG_west] - source: core1 target: edge3 - link_params: {capacity: 200, risk_groups: [RG_west]} + capacity: 200 + risk_groups: [RG_west] - source: core2 target: edge2 - link_params: {capacity: 400, risk_groups: [RG_east]} + capacity: 400 + risk_groups: [RG_east] risk_groups: - name: RG_core @@ -525,31 +525,32 @@ risk_groups: - name: RG_east attrs: {tier: edge, distance_km: 800} -failure_policy_set: +failures: mixed_failures: - fail_risk_groups: true # Expand to shared-risk entities - fail_risk_group_children: false + expand_groups: true # Expand to shared-risk entities + expand_children: false modes: # 40% chance: fail 1 edge node weighted by capacity - weight: 0.4 attrs: {scenario: edge_failure} rules: - - entity_scope: node - rule_type: choice + - scope: node + mode: choice count: 1 - conditions: - - attr: role - operator: "==" - value: edge - logic: and + match: + logic: and + conditions: + - attr: role + op: "==" + value: edge weight_by: capacity_gbps # 35% chance: fail 1 risk group weighted by distance - weight: 0.35 attrs: {scenario: srlg_failure} rules: - - entity_scope: risk_group - rule_type: choice + - scope: risk_group + mode: choice count: 1 weight_by: distance_km @@ -557,26 +558,27 @@ failure_policy_set: - weight: 0.15 attrs: {scenario: regional_outage} rules: - - entity_scope: node - rule_type: all - conditions: - - attr: region - operator: "==" - value: west + - scope: node + mode: all + match: + conditions: + - attr: region + op: "==" + value: west # 10% chance: random link failures (5% each) - weight: 0.1 attrs: {scenario: random_link} rules: - - entity_scope: link - rule_type: random + - scope: link + mode: random probability: 0.05 workflow: - - step_type: MaxFlow + - type: MaxFlow name: failure_analysis source: "^(edge[1-3])$" - sink: "^(edge[1-3])$" + target: "^(edge[1-3])$" mode: pairwise failure_policy: mixed_failures iterations: 1000 @@ -620,45 +622,44 @@ network: name: "datacenter-fabric" version: "2.0" - groups: + nodes: spine: - node_count: 2 - name_template: "spine-{node_num}" + count: 2 + template: "spine{n}" attrs: hardware: component: SpineRouter count: 1 leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf{n}" attrs: hardware: component: LeafRouter count: 1 - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_count: 2 # 2 parallel links per pair - link_params: - capacity: 800 - cost: 1 - attrs: - hardware: - source: - component: Optic400G - count: 2 - target: - component: Optic400G - count: 2 - exclusive: true # Dedicated optics (rounds up count) + count: 2 # 2 parallel links per pair + capacity: 800 + cost: 1 + attrs: + hardware: + source: + component: Optic400G + count: 2 + target: + component: Optic400G + count: 2 + exclusive: true # Dedicated optics (rounds up count) workflow: - - step_type: NetworkStats + - type: NetworkStats name: stats - - step_type: CostPower + - type: CostPower name: cost_analysis include_disabled: false aggregation_level: 1 # Aggregate by top-level group @@ -683,67 +684,65 @@ vars: tier: 1 network: - groups: + nodes: spine: - node_count: 2 - name_template: "spine-{node_num}" + count: 2 + template: "spine{n}" attrs: <<: *spine_attrs # Merge anchor region: east leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf{n}" attrs: <<: *leaf_attrs region: east - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_params: - <<: *link_cfg # Reuse link config - attrs: - link_type: fabric + <<: *link_cfg # Reuse link config + attrs: + link_type: fabric ``` **Result**: Anchors resolved during YAML parsing; cleaner, less repetitive config -## Example 14: One-to-One Adjacency and Zip Expansion +## Example 14: One-to-One Pattern and Zip Expansion Demonstrating pairwise connectivity patterns. ```yaml network: - groups: + nodes: # 4 servers, 2 switches - compatible for one_to_one (4 is multiple of 2) server[1-4]: - node_count: 1 - name_template: "srv" + count: 1 + template: "srv" switch[1-2]: - node_count: 1 - name_template: "sw" + count: 1 + template: "sw" - adjacency: + links: # one_to_one: server1->switch1, server2->switch2, server3->switch1, server4->switch2 - source: /server target: /switch pattern: one_to_one - link_params: - capacity: 100 + capacity: 100 # zip expansion: pairs variables by index (equal-length lists required) - source: "server${idx}" target: "switch${sw}" - expand_vars: - idx: [1, 2] - sw: [1, 2] - expansion_mode: zip # server1->switch1, server2->switch2 + expand: + vars: + idx: [1, 2] + sw: [1, 2] + mode: zip # server1->switch1, server2->switch2 pattern: one_to_one - link_params: - capacity: 50 - cost: 2 + capacity: 50 + cost: 2 ``` **Result**: Demonstrates one_to_one modulo wrap and zip expansion mode @@ -761,33 +760,34 @@ network: dc2_leaf2: {attrs: {dc: dc2, role: leaf}} dc3_leaf1: {attrs: {dc: dc3, role: leaf}} links: - - {source: dc1_leaf1, target: dc2_leaf1, link_params: {capacity: 100}} - - {source: dc1_leaf2, target: dc2_leaf2, link_params: {capacity: 100}} - - {source: dc2_leaf1, target: dc3_leaf1, link_params: {capacity: 100}} + - {source: dc1_leaf1, target: dc2_leaf1, capacity: 100} + - {source: dc1_leaf2, target: dc2_leaf2, capacity: 100} + - {source: dc2_leaf1, target: dc3_leaf1, capacity: 100} -traffic_matrix_set: +demands: # Variable expansion in demands inter_dc: - source: "^${src}/.*" - sink: "^${dst}/.*" - demand: 50 - expand_vars: - src: [dc1, dc2] - dst: [dc2, dc3] - expansion_mode: zip # dc1->dc2, dc2->dc3 + target: "^${dst}/.*" + volume: 50 + expand: + vars: + src: [dc1, dc2] + dst: [dc2, dc3] + mode: zip # dc1->dc2, dc2->dc3 # Group modes with group_by grouped: - source: group_by: dc - sink: + target: group_by: dc - demand: 100 + volume: 100 mode: pairwise group_mode: per_group # Separate demand per group pair priority: 1 demand_placed: 10.0 # 10 units pre-placed - flow_policy_config: SHORTEST_PATHS_WCMP + flow_policy: SHORTEST_PATHS_WCMP ``` **Result**: Shows variable expansion in demands, group_mode, priority, demand_placed @@ -804,9 +804,9 @@ network: rack1_srv3: {risk_groups: [Rack1_Card2]} rack2_srv1: {risk_groups: [Rack2]} links: - - {source: rack1_srv1, target: rack2_srv1, link_params: {capacity: 100}} - - {source: rack1_srv2, target: rack2_srv1, link_params: {capacity: 100}} - - {source: rack1_srv3, target: rack2_srv1, link_params: {capacity: 100}} + - {source: rack1_srv1, target: rack2_srv1, capacity: 100} + - {source: rack1_srv2, target: rack2_srv1, capacity: 100} + - {source: rack1_srv3, target: rack2_srv1, capacity: 100} risk_groups: - name: Rack1 @@ -820,20 +820,21 @@ risk_groups: disabled: false attrs: {location: "DC1-Row2"} -failure_policy_set: +failures: hierarchical: - fail_risk_groups: true - fail_risk_group_children: true # Failing Rack1 also fails Card1, Card2 + expand_groups: true + expand_children: true # Failing Rack1 also fails Card1, Card2 modes: - weight: 1.0 rules: - - entity_scope: risk_group - rule_type: choice + - scope: risk_group + mode: choice count: 1 - conditions: - - attr: location - operator: contains # String contains - value: "DC1" + match: + conditions: + - attr: location + op: contains # String contains + value: "DC1" ``` **Result**: Hierarchical risk groups with recursive child failure expansion @@ -852,39 +853,38 @@ network: links: - source: core1 target: core2 - link_params: - capacity: 1000 - attrs: - route_type: backbone - path_id: primary + capacity: 1000 + attrs: + route_type: backbone + path_id: primary - source: core1 target: edge1 - link_params: {capacity: 400} + capacity: 400 risk_groups: # Assign all core tier-3 nodes - name: CoreTier3 membership: - entity_scope: node + scope: node match: logic: and # Must match ALL conditions conditions: - attr: role - operator: "==" + op: "==" value: core - attr: tier - operator: "==" + op: "==" value: 3 # Assign links by route type - name: BackboneLinks membership: - entity_scope: link + scope: link match: logic: and conditions: - attr: route_type # Dot-notation for nested attrs - operator: "==" + op: "==" value: backbone # String shorthand for simple groups @@ -906,38 +906,36 @@ network: links: - source: srv1 target: srv2 - link_params: - capacity: 100 - attrs: - connection_type: intra_dc + capacity: 100 + attrs: + connection_type: intra_dc - source: srv2 target: srv3 - link_params: - capacity: 100 - attrs: - connection_type: inter_dc + capacity: 100 + attrs: + connection_type: inter_dc risk_groups: # Generate risk group per datacenter (from nodes) - generate: - entity_scope: node + scope: node group_by: datacenter - name_template: "DC_${value}" + name: "DC_${value}" attrs: generated: true type: location # Generate risk group per rack (from nodes) - generate: - entity_scope: node + scope: node group_by: rack - name_template: "Rack_${value}" + name: "Rack_${value}" # Generate risk group per connection type (from links) - generate: - entity_scope: link + scope: link group_by: connection_type - name_template: "Links_${value}" + name: "Links_${value}" ``` **Result**: Creates 6 risk groups: @@ -961,26 +959,26 @@ network: srv3: {attrs: {tier: 3, tags: [dev], region: west}} srv4: {attrs: {tier: 2}} links: - - {source: srv1, target: srv2, link_params: {capacity: 100}} - - {source: srv2, target: srv3, link_params: {capacity: 100}} - - {source: srv3, target: srv4, link_params: {capacity: 100}} + - {source: srv1, target: srv2, capacity: 100} + - {source: srv2, target: srv3, capacity: 100} + - {source: srv3, target: srv4, capacity: 100} -traffic_matrix_set: +demands: filtered: # Tier comparison operators - source: match: conditions: - attr: tier - operator: ">=" + op: ">=" value: 2 - sink: + target: match: conditions: - attr: tier - operator: "<" + op: "<" value: 3 - demand: 50 + volume: 50 mode: pairwise # List membership operators @@ -988,15 +986,15 @@ traffic_matrix_set: match: conditions: - attr: region - operator: in + op: in value: [east, west] - sink: + target: match: conditions: - attr: tags - operator: contains # List contains value + op: contains # List contains value value: prod - demand: 25 + volume: 25 mode: combine # Existence operators @@ -1004,14 +1002,188 @@ traffic_matrix_set: match: conditions: - attr: region - operator: any_value # Attribute exists and not null - sink: + op: exists # Attribute exists and not null + target: match: conditions: - attr: region - operator: no_value # Attribute missing or null - demand: 10 + op: not_exists # Attribute missing or null + volume: 10 + mode: pairwise +``` + +**Result**: Demonstrates `>=`, `<`, `in`, `contains`, `exists`, `not_exists` operators + +## Example 20: link_match and Rule Expansion + +Using `link_match` to filter link rules by the link's own attributes, and `expand` for variable-based rule application. + +```yaml +network: + nodes: + dc1_srv: {} + dc2_srv: {} + dc3_srv: {} + links: + - {source: dc1_srv, target: dc2_srv, capacity: 100, cost: 1, attrs: {type: fiber}} + - {source: dc1_srv, target: dc2_srv, capacity: 500, cost: 1, attrs: {type: fiber}} + - {source: dc2_srv, target: dc3_srv, capacity: 500, cost: 1, attrs: {type: copper}} + + # Update only high-capacity fiber links + link_rules: + - source: ".*" + target: ".*" + link_match: + logic: and + conditions: + - {attr: capacity, op: ">=", value: 400} + - {attr: type, op: "==", value: fiber} + cost: 99 + attrs: + priority: high + + # Apply node rules using variable expansion + node_rules: + - path: "${dc}_srv" + expand: + vars: + dc: [dc1, dc2] + mode: cartesian + attrs: + tagged: true +``` + +**Result**: Only the 500-capacity fiber link (dc1_srv -> dc2_srv) gets cost 99. Nodes dc1_srv and dc2_srv are tagged. + +## Example 21: Nested Inline Nodes (No Blueprint) + +Creating hierarchical topology structure without using blueprints. + +```yaml +network: + nodes: + datacenter: + attrs: + region: west + tier: 1 + nodes: + rack1: + attrs: + rack_id: 1 + nodes: + tor: + count: 1 + template: "sw{n}" + attrs: + role: switch + servers: + count: 4 + template: "srv{n}" + attrs: + role: compute + rack2: + attrs: + rack_id: 2 + nodes: + tor: + count: 1 + template: "sw{n}" + attrs: + role: switch + servers: + count: 4 + template: "srv{n}" + attrs: + role: compute + + links: + # Connect servers to their TOR switch in each rack + - source: + path: "datacenter/rack1/servers" + target: + path: "datacenter/rack1/tor" + pattern: mesh + capacity: 25 + - source: + path: "datacenter/rack2/servers" + target: + path: "datacenter/rack2/tor" + pattern: mesh + capacity: 25 + # Connect TOR switches + - source: datacenter/rack1/tor/sw1 + target: datacenter/rack2/tor/sw1 + capacity: 100 +``` + +**Result**: Creates 10 nodes (2 switches + 8 servers) in a two-rack hierarchy. All nodes inherit `region: west` and `tier: 1` from the datacenter parent. Each rack's nodes get the appropriate `rack_id`. + +## Example 22: path Filter in Generate Blocks + +Using `path` to narrow entities before generating risk groups. + +```yaml +network: + nodes: + prod_web1: {attrs: {env: production, service: web}} + prod_web2: {attrs: {env: production, service: web}} + prod_db1: {attrs: {env: production, service: database}} + dev_web1: {attrs: {env: development, service: web}} + dev_db1: {attrs: {env: development, service: database}} + links: + - {source: prod_web1, target: prod_db1, capacity: 100, attrs: {link_type: internal}} + - {source: prod_web2, target: prod_db1, capacity: 100, attrs: {link_type: internal}} + - {source: dev_web1, target: dev_db1, capacity: 50, attrs: {link_type: internal}} + +risk_groups: + # Generate env-based risk groups only for production nodes + - generate: + scope: node + path: "^prod_.*" + group_by: env + name: "Env_${value}" + attrs: + generated: true + critical: true + + # Generate service-based risk groups for all nodes + - generate: + scope: node + group_by: service + name: "Service_${value}" + + # Generate link risk groups only for production links + - generate: + scope: link + path: ".*prod.*" + group_by: link_type + name: "ProdLinks_${value}" + +demands: + baseline: + - source: "^prod_web.*" + target: "^prod_db.*" + volume: 50 mode: pairwise + flow_policy: SHORTEST_PATHS_ECMP + +failures: + production_failure: + expand_groups: true + modes: + - weight: 1.0 + rules: + - scope: risk_group + path: "^Env_.*" + mode: choice + count: 1 ``` -**Result**: Demonstrates `>=`, `<`, `in`, `contains`, `any_value`, `no_value` operators +**Result**: Creates the following risk groups: + +- `Env_production` (only production nodes due to path filter) +- `Service_web` (prod_web1, prod_web2, dev_web1) +- `Service_database` (prod_db1, dev_db1) +- `ProdLinks_internal` (only production links due to path filter) + +Note: `Env_development` is NOT created because dev nodes don't match `^prod_.*`. diff --git a/.claude/skills/netgraph-dsl/references/REFERENCE.md b/.claude/skills/netgraph-dsl/references/REFERENCE.md index 03c3cec..2dbc878 100644 --- a/.claude/skills/netgraph-dsl/references/REFERENCE.md +++ b/.claude/skills/netgraph-dsl/references/REFERENCE.md @@ -14,23 +14,21 @@ NetGraph DSL uses three distinct template syntaxes in different contexts: | Syntax | Example | Where | Purpose | |--------|---------|-------|---------| | **Brackets** `[1-3]` | `dc[1-3]/rack[a,b]` | Group names, risk groups | Generate multiple entities | -| **Variables** `$var` | `pod${p}/leaf` | Adjacency, demands | Template expansion | -| **Format** `{node_num}` | `srv-{node_num}` | `name_template` | Node naming | +| **Variables** `$var` | `pod${p}/leaf` | Links, demands | Template expansion | +| **Format** `{n}` | `srv{n}` | `template` | Node naming | **Important**: These syntaxes are NOT interchangeable: - `[1-3]` works in group names and risk groups (definitions and memberships), not components -- `${var}` requires `expand_vars` dict; only works in adjacency `source`/`target` and demand `source`/`sink` -- `{node_num}` is the only placeholder available in `name_template` (Python format syntax) +- `${var}` requires `expand.vars` dict; only works in link `source`/`target` and demand `source`/`target` +- `{n}` is the only placeholder available in `template` (Python format syntax) ### Endpoint Naming Conventions | Context | Fields | Terminology | |---------|--------|-------------| -| Links, adjacency, link_overrides | `source`, `target` | Graph edge | -| Traffic demands, workflow steps | `source`, `sink` | Max-flow | - -**Why different?** Links use graph terminology (`target` = edge destination). Traffic demands and analysis use max-flow terminology (`sink` = flow destination). +| Links, link_rules | `source`, `target` | Graph edge | +| Traffic demands, workflow steps | `source`, `target` | Max-flow | ### Expansion Controls in Traffic Demands @@ -38,9 +36,9 @@ Traffic demands have three expansion-related fields: | Field | Values | Default | Purpose | |-------|--------|---------|---------| -| `mode` | `combine`, `pairwise` | `combine` | How source/sink nodes pair | +| `mode` | `combine`, `pairwise` | `combine` | How source/target nodes pair | | `group_mode` | `flatten`, `per_group`, `group_pairwise` | `flatten` | How grouped nodes expand | -| `expansion_mode` | `cartesian`, `zip` | `cartesian` | How `expand_vars` combine | +| `expand.mode` | `cartesian`, `zip` | `cartesian` | How `expand.vars` combine | See detailed sections below for each mechanism. @@ -52,7 +50,7 @@ The DSL implements two fundamentally different selection patterns optimized for The DSL uses distinct selection strategies depending on the operation: -**1. Path-Based Node Selection** (adjacency rules, traffic demands, workflow steps) +**1. Path-Based Node Selection** (link rules, traffic demands, workflow steps) - Uses regex patterns on hierarchical node names - Supports capture group-based grouping @@ -62,19 +60,19 @@ The DSL uses distinct selection strategies depending on the operation: **2. Condition-Based Entity Selection** (failure rules, membership rules, risk group generation) -- Works on nodes, links, or risk_groups (`entity_scope`) -- Uses only attribute-based filtering (`conditions`) -- No path/regex patterns (operates on all entities of specified type) +- Works on nodes/links; failure + membership can also target risk_groups +- Optional regex `path` filter on entity names/IDs (no capture grouping) +- Attribute filtering via `match.conditions` for failure/membership; generate uses `group_by` only These patterns share common primitives (condition evaluation, match specification) but serve different purposes and should not be confused. -### Adjacency Creation Flow +### Link Creation Flow -Adjacency rules create links between nodes using path-based selection with optional filtering: +Link definitions create links between nodes using path-based selection with optional filtering: ```mermaid flowchart TD - Start[Adjacency Definition] --> VarExpand{Has expand_vars?} + Start[Link Definition] --> VarExpand{Has expand.vars?} VarExpand -->|Yes| VarSubst[Variable Substitution] VarSubst --> PathFilter VarExpand -->|No| PathFilter[1. Path-Based Selection] @@ -100,7 +98,7 @@ flowchart TD - Uses `logic: "and"` or `"or"` (default: `"or"`) - Supports operators: `==`, `!=`, `<`, `>`, `contains`, `in`, etc. 3. **Active Filtering**: Filters disabled nodes based on context - - Adjacency default: `active_only=false` (creates links to disabled nodes) + - Link default: `active_only=false` (creates links to disabled nodes) 4. **Attribute Grouping**: Optional `group_by` overrides regex capture grouping 5. **Pattern Application**: Creates links between selected node groups - `mesh`: Every source to every target @@ -110,7 +108,7 @@ flowchart TD - `default_active_only=False` (links are created to disabled nodes) - `match.logic` defaults to `"or"` (inclusive matching) -- Supports variable expansion via `expand_vars` +- Supports variable expansion via `expand.vars` ### Traffic Demand Creation Flow @@ -118,33 +116,33 @@ Traffic demands follow a similar pattern but with important differences: ```mermaid flowchart TD - Start[Traffic Demand Spec] --> VarExpand{Has expand_vars?} + Start[Traffic Demand Spec] --> VarExpand{Has expand.vars?} VarExpand -->|Yes| VarSubst[Variable Substitution
Creates multiple demand specs] VarSubst --> Process VarExpand -->|No| Process[Process Single Demand] Process --> SrcSelect[1. Select Source Nodes] - SrcSelect --> SinkSelect[2. Select Sink Nodes] - SinkSelect --> SrcDesc[Uses same path + match + group_by
selection as adjacency] + SrcSelect --> TgtSelect[2. Select Target Nodes] + TgtSelect --> SrcDesc[Uses same path + match + group_by
selection as links] SrcDesc --> Mode{Demand Mode?} Mode -->|pairwise| Pairwise[3a. Pairwise Expansion] Mode -->|combine| Combine[3b. Combine Expansion] Pairwise --> PairDesc[Create demand for each src-dst pair
Volume distributed evenly
No pseudo nodes] - Combine --> CombDesc[Create pseudo-source and pseudo-sink
Single aggregated demand
Augmentation edges connect real nodes] + Combine --> CombDesc[Create pseudo-source and pseudo-target
Single aggregated demand
Augmentation edges connect real nodes] ``` -**Key Differences from Adjacency:** +**Key Differences from Links:** 1. **Active-only default**: `default_active_only=True` (only active nodes participate) -2. **Two selection phases**: Source nodes first, then sink nodes (both use same selector logic) +2. **Two selection phases**: Source nodes first, then target nodes (both use same selector logic) 3. **Expansion modes**: - - **Pairwise**: Creates individual demands for each (source, sink) pair + - **Pairwise**: Creates individual demands for each (source, target) pair - **Combine**: Creates pseudo nodes and a single aggregated demand 4. **Group modes**: Additional layer (`flatten`, `per_group`, `group_pairwise`) for handling grouped selections **Processing Steps:** 1. Select source nodes using unified selector (path + match + group_by) -2. Select sink nodes using unified selector +2. Select target nodes using unified selector 3. Apply mode-specific expansion: - **Pairwise**: Volume evenly distributed across all pairs - **Combine**: Single demand with pseudo nodes for aggregation @@ -162,11 +160,11 @@ flowchart TD Direct --> DirectDesc[Simply name the risk group
Entities reference it explicitly] - Member --> MemberScope[Specify entity_scope
node, link, or risk_group] - MemberScope --> MemberCond[Define match conditions
logic defaults to and] - MemberCond --> MemberExec[Scan ALL entities of that scope
Add matching entities to risk group] + Member --> MemberScope[Specify scope
node, link, or risk_group] + MemberScope --> MemberCond[Optional path filter
Match conditions (logic defaults to and)] + MemberCond --> MemberExec[Scan entities of that scope
Apply path + match] - Generate --> GenScope[Specify entity_scope
node or link only] + Generate --> GenScope[Specify scope
node or link only] GenScope --> GenGroupBy[Specify group_by attribute] GenGroupBy --> GenExec[Collect unique values
Create risk group for each value
Add entities with that value] ``` @@ -179,24 +177,24 @@ flowchart TD **Key Characteristics:** -- **No path patterns**: Operates on ALL entities of specified scope -- **Only attribute-based**: Uses `conditions` exclusively -- **Logic defaults to "and"** for membership (stricter matching) +- **Optional path filter**: Regex `path` narrows entities before matching +- **Membership uses `match.conditions`**; generate uses `group_by` only +- **`match.logic` defaults to "and"** for membership (stricter matching) - **Hierarchical support**: Risk groups can contain other risk groups as children ### Comparison Table -| Feature | Adjacency | Traffic Demands | Risk Groups | -|---------|-----------|----------------|-------------| +| Feature | Links | Traffic Demands | Risk Groups | +|---------|-------|----------------|-------------| | Selection Type | Path-based | Path-based | Condition-based | -| Regex Patterns | Yes | Yes | No | +| Regex Patterns | Yes | Yes | Yes (path filter) | | Capture Groups | Yes | Yes | No | | `group_by` | Yes | Yes | Yes (generate only) | -| `match` Conditions | Yes | Yes | Yes (membership/generate) | +| `match` Conditions | Yes | Yes | Yes (membership/failure) | | `active_only` Default | False | True | N/A | | `match.logic` Default | "or" | "or" | "and" (membership) | | Variable Expansion | Yes | Yes | No | -| Entity Scope | Nodes only | Nodes only | Nodes, links, risk_groups | +| Entity Scope | Nodes only | Nodes only | Nodes, links, risk_groups (generate: node/link) | ### Shared Evaluation Primitives @@ -205,7 +203,7 @@ All selection mechanisms share common evaluation primitives: 1. **Condition evaluation**: `evaluate_condition()` handles all operators - Comparison: `==`, `!=`, `<`, `<=`, `>`, `>=` - String/collection: `contains`, `not_contains`, `in`, `not_in` - - Existence: `any_value`, `no_value` + - Existence: `exists`, `not_exists` 2. **Condition combining**: `evaluate_conditions()` applies `"and"`/`"or"` logic @@ -226,9 +224,9 @@ The DSL uses context-aware defaults to optimize for common use cases: | Context | Selection Type | Active Only | Match Logic | Rationale | |---------|---------------|-------------|-------------|-----------| -| Adjacency | Path-based | False | "or" | Create links to all nodes, including disabled | +| Links | Path-based | False | "or" | Create links to all nodes, including disabled | | Demands | Path-based | True | "or" | Only route traffic through active nodes | -| Node Overrides | Path-based | False | "or" | Modify all matching nodes | +| Node Rules | Path-based | False | "or" | Modify all matching nodes | | Workflow Steps | Path-based | True | "or" | Analyze only active topology | | Membership Rules | Condition-based | N/A | "and" | Precise matching for risk assignment | | Failure Rules | Condition-based | N/A | "or" | Inclusive matching for failure scenarios | @@ -240,13 +238,13 @@ These defaults ensure intuitive behavior while remaining overridable when needed | Key | Required | Purpose | |-----|----------|---------| -| `network` | Yes | Network topology (nodes, links, groups, adjacency) | +| `network` | Yes | Network topology (nodes, links) | | `blueprints` | No | Reusable topology templates | | `components` | No | Hardware component library | | `risk_groups` | No | Failure correlation groups | | `vars` | No | YAML anchors for value reuse | -| `traffic_matrix_set` | No | Traffic demand definitions | -| `failure_policy_set` | No | Failure simulation policies | +| `demands` | No | Traffic demand definitions | +| `failures` | No | Failure simulation policies | | `workflow` | No | Analysis execution steps | | `seed` | No | Master seed for reproducible random operations | @@ -278,7 +276,7 @@ network: count: 1 ``` -**Allowed node keys**: `disabled`, `attrs`, `risk_groups` +**Allowed node keys**: `disabled`, `attrs`, `risk_groups`, `count`, `template`, `blueprint`, `params`, `nodes` ### Direct Link Definitions @@ -287,50 +285,79 @@ network: links: - source: Seattle target: Portland - link_params: # Required wrapper - capacity: 100.0 - cost: 10 - disabled: false - risk_groups: ["RG_Seattle_Portland"] - attrs: - distance_km: 280 - media_type: fiber - hardware: - source: - component: "800G-ZR+" - count: 1 - exclusive: false # Optional: unsharable usage (rounds up) - target: - component: "800G-ZR+" - count: 1 - link_count: 2 # Optional: parallel links + capacity: 100.0 # Direct property + cost: 10 + disabled: false + risk_groups: ["RG_Seattle_Portland"] + attrs: + distance_km: 280 + media_type: fiber + hardware: + source: + component: "800G-ZR+" + count: 1 + exclusive: false # Optional: unsharable usage (rounds up) + target: + component: "800G-ZR+" + count: 1 + count: 2 # Optional: parallel links ``` -**Allowed link keys**: `source`, `target`, `link_params`, `link_count` - -**Allowed link_params keys**: `capacity`, `cost`, `disabled`, `risk_groups`, `attrs` +**Allowed link keys**: `source`, `target`, `pattern`, `count`, `capacity`, `cost`, `disabled`, `risk_groups`, `attrs`, `expand` **Link hardware per-end fields**: `component`, `count`, `exclusive` ### Node Groups -Groups create multiple nodes from a template: +Groups create multiple nodes from a template (distinguished by having `count` field): ```yaml network: - groups: + nodes: servers: - node_count: 4 - name_template: "srv-{node_num}" + count: 4 + template: "srv{n}" disabled: false risk_groups: ["RG_Servers"] attrs: role: compute ``` -Creates: `servers/srv-1`, `servers/srv-2`, `servers/srv-3`, `servers/srv-4` +Creates: `servers/srv1`, `servers/srv2`, `servers/srv3`, `servers/srv4` + +**Group-specific keys**: `count`, `template` + +### Nested Inline Nodes -**Allowed group keys**: `node_count`, `name_template`, `attrs`, `disabled`, `risk_groups` +Create hierarchical structures without blueprints using inline `nodes`: + +```yaml +network: + nodes: + datacenter: + attrs: + region: west + nodes: + rack1: + count: 2 + template: "srv{n}" + attrs: + role: compute + rack2: + count: 2 + template: "srv{n}" +``` + +Creates: `datacenter/rack1/srv1`, `datacenter/rack1/srv2`, `datacenter/rack2/srv1`, `datacenter/rack2/srv2` + +**Key points:** + +- Child nodes inherit parent `attrs`, `disabled`, and `risk_groups` +- Child-specific values override inherited ones +- Can be nested to any depth +- Useful for simple hierarchies without reusable blueprints + +**Allowed keys for nested containers**: `nodes`, `attrs`, `disabled`, `risk_groups` ### Bracket Expansion @@ -338,10 +365,10 @@ Create multiple similar groups using bracket notation: ```yaml network: - groups: + nodes: dc[1-3]/rack[a,b]: - node_count: 4 - name_template: "srv-{node_num}" + count: 4 + template: "srv{n}" ``` **Expansion types**: @@ -354,11 +381,11 @@ Multiple brackets create Cartesian product. **Scope**: Bracket expansion applies to: -- **Group names** under `network.groups` and `blueprints.*.groups` +- **Group names** under `network.nodes` and `blueprints.*.nodes` - **Risk group names** in top-level `risk_groups` definitions (including children) - **Risk group membership arrays** on nodes, links, and groups -It does NOT apply to: component names, direct node names (`network.nodes`), or other string fields. +It does NOT apply to: component names, direct node names without `count`, or other string fields. **Risk group expansion examples**: @@ -376,12 +403,12 @@ network: ### Path Patterns -Path patterns in selectors and overrides are **regex patterns** matched against node names using `re.match()` (anchored at start). +Path patterns in selectors and rules are **regex patterns** matched against node names using `re.match()` (anchored at start). **Key behaviors**: - Paths are matched from the **start** of the node name (no implicit `.*` prefix) -- Node names are hierarchical: `group/subgroup/node-1` +- Node names are hierarchical: `group/subgroup/node1` - Leading `/` is stripped before matching (has no functional effect) - All paths are relative to the current scope @@ -389,28 +416,27 @@ Path patterns in selectors and overrides are **regex patterns** matched against | Pattern | Matches | Does NOT Match | |---------|---------|----------------| -| `leaf` | `leaf/leaf-1`, `leaf/leaf-2` | `pod1/leaf/leaf-1` | -| `pod1/leaf` | `pod1/leaf/leaf-1` | `pod2/leaf/leaf-1` | -| `.*leaf` | `leaf/leaf-1`, `pod1/leaf/leaf-1` | (matches any path containing "leaf") | -| `pod[12]/leaf` | `pod1/leaf/leaf-1`, `pod2/leaf/leaf-1` | `pod3/leaf/leaf-1` | +| `leaf` | `leaf/leaf1`, `leaf/leaf2` | `pod1/leaf/leaf1` | +| `pod1/leaf` | `pod1/leaf/leaf1` | `pod2/leaf/leaf1` | +| `.*leaf` | `leaf/leaf1`, `pod1/leaf/leaf1` | (matches any path containing "leaf") | +| `pod[12]/leaf` | `pod1/leaf/leaf1`, `pod2/leaf/leaf1` | `pod3/leaf/leaf1` | **Path scoping**: -- **At top-level** (`network.adjacency`): Parent path is empty, so patterns match against full node names. `/leaf` and `leaf` are equivalent. +- **At top-level** (`network.links`): Parent path is empty, so patterns match against full node names. `/leaf` and `leaf` are equivalent. - **In blueprints**: Paths are relative to instantiation path. If `pod1` uses a blueprint with `source: /leaf`, the pattern becomes `pod1/leaf`. -### Adjacency Rules +### Link Rules (with patterns) ```yaml network: - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 100 - cost: 1 - link_count: 1 + capacity: 100 + cost: 1 + count: 1 ``` **Patterns**: @@ -418,34 +444,33 @@ network: - `mesh`: Full connectivity (every source to every target) - `one_to_one`: Pairwise with modulo wrap. Sizes must have multiple factor (4-to-2 OK, 3-to-2 ERROR) -### Adjacency Selectors +### Link Selectors Filter nodes using attribute conditions: ```yaml network: - adjacency: + links: - source: path: "/datacenter" match: logic: and # "and" or "or" (default varies by context) conditions: - attr: role - operator: "==" + op: "==" value: leaf - attr: tier - operator: ">=" + op: ">=" value: 2 target: path: "/datacenter" match: conditions: - attr: role - operator: "==" + op: "==" value: spine pattern: mesh - link_params: - capacity: 100 + capacity: 100 ``` **Condition operators**: @@ -459,25 +484,25 @@ network: | `not_contains` | String/list does not contain | | `in` | Value in list | | `not_in` | Value not in list | -| `any_value` | Attribute exists and is not None | -| `no_value` | Attribute missing or None | +| `exists` | Attribute exists and is not None | +| `not_exists` | Attribute missing or None | -### Variable Expansion in Adjacency +### Variable Expansion in Links -Use `$var` or `${var}` syntax in adjacency `source`/`target` fields: +Use `$var` or `${var}` syntax in link `source`/`target` fields: ```yaml network: - adjacency: + links: - source: "plane${p}/rack" target: "spine${s}" - expand_vars: - p: [1, 2] - s: [1, 2, 3] - expansion_mode: cartesian + expand: + vars: + p: [1, 2] + s: [1, 2, 3] + mode: cartesian pattern: mesh - link_params: - capacity: 100 + capacity: 100 ``` **Expansion modes**: @@ -494,42 +519,41 @@ Reusable topology templates: ```yaml blueprints: clos_pod: - groups: + nodes: leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf{n}" attrs: role: leaf spine: - node_count: 2 - name_template: "spine-{node_num}" + count: 2 + template: "spine{n}" attrs: role: spine - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 100 - cost: 1 + capacity: 100 + cost: 1 ``` ### Blueprint Usage ```yaml network: - groups: + nodes: pod1: - use_blueprint: clos_pod + blueprint: clos_pod attrs: # Merged into all subgroup nodes location: datacenter_east - parameters: # Override blueprint defaults - leaf.node_count: 6 - spine.name_template: "core-{node_num}" + params: # Override blueprint defaults + leaf.count: 6 + spine.template: "core{n}" leaf.attrs.priority: high ``` -Creates: `pod1/leaf/leaf-1`, `pod1/spine/spine-1`, etc. +Creates: `pod1/leaf/leaf1`, `pod1/spine/spine1`, etc. **Parameter override syntax**: `.` or `.attrs.` @@ -540,7 +564,7 @@ All paths are relative to the current scope. In blueprints, paths resolve relati ```yaml blueprints: my_bp: - adjacency: + links: - source: /leaf # Becomes pod1/leaf when instantiated as pod1 target: spine # Also becomes pod1/spine (leading / is optional) pattern: mesh @@ -548,42 +572,102 @@ blueprints: **Note**: Leading `/` is stripped and has no functional effect. Both `/leaf` and `leaf` produce the same result. The `/` serves as a visual convention indicating "from scope root". -## Node and Link Overrides +## Node and Link Rules Modify nodes/links after initial creation: ```yaml network: - node_overrides: + node_rules: - path: "^pod1/spine/.*$" # Regex pattern disabled: true risk_groups: ["Maintenance"] attrs: maintenance_mode: active - link_overrides: + link_rules: - source: "^pod1/leaf/.*$" target: "^pod1/spine/.*$" - any_direction: true # Match both directions (default: true) - link_params: - capacity: 200 - attrs: - upgraded: true + bidirectional: true # Match both directions (default: true) + capacity: 200 + attrs: + upgraded: true +``` + +### Node Rule Fields + +- `path`: Regex pattern for matching node names (default: `".*"`) +- `match`: Optional attribute conditions to filter nodes (see below) +- `expand`: Optional variable expansion (see [Variable Expansion](#variable-expansion-in-rules)) +- `disabled`, `risk_groups`, `attrs`: Properties to set on matching nodes + +**Node rule with match conditions:** + +```yaml +node_rules: + - path: ".*" + match: + logic: and # "and" or "or" (default: "or") + conditions: + - {attr: role, op: "==", value: compute} + - {attr: tier, op: ">=", value: 2} + disabled: true ``` -**Link override fields**: +### Link Rule Fields - `source`, `target`: Regex patterns for matching link endpoints -- `any_direction`: If `true` (default), matches both A→B and B→A directions -- `link_params`: Parameters to override (`capacity`, `cost`, `disabled`, `risk_groups`, `attrs`) +- `bidirectional`: If `true` (default), matches both A→B and B→A directions +- `link_match`: Optional conditions to filter by the link's own attributes +- `expand`: Optional variable expansion (see [Variable Expansion](#variable-expansion-in-rules)) +- Direct properties: `capacity`, `cost`, `disabled`, `risk_groups`, `attrs` + +**Link rule with link_match:** + +```yaml +link_rules: + - source: "^pod1/.*$" + target: "^pod2/.*$" + link_match: + logic: and + conditions: + - {attr: capacity, op: ">=", value: 400} + - {attr: type, op: "==", value: fiber} + cost: 99 # Only high-capacity fiber links updated +``` + +### Variable Expansion in Rules + +Use `expand` to apply a rule across multiple patterns: + +```yaml +node_rules: + - path: "${dc}_srv1" + expand: + vars: + dc: [dc1, dc2, dc3] + mode: cartesian + attrs: + tagged: true + +link_rules: + - source: "${src}_srv" + target: "${tgt}_srv" + expand: + vars: + src: [dc1, dc2] + tgt: [dc2, dc3] + mode: zip # Pairs by index: dc1->dc2, dc2->dc3 + capacity: 200 +``` **Processing order**: 1. Groups and direct nodes created -2. **Node overrides applied** -3. Adjacency and blueprint adjacencies expanded +2. **Node rules applied** +3. Blueprint links and network links expanded 4. Direct links created -5. **Link overrides applied** +5. **Link rules applied** ## Components Library @@ -638,7 +722,7 @@ components: ```yaml network: nodes: - spine-1: + spine1: attrs: hardware: component: "SpineRouter" @@ -678,31 +762,31 @@ Dynamically assign entities to risk groups based on attribute conditions: risk_groups: - name: HighCapacityLinks membership: - entity_scope: link # node, link, or risk_group + scope: link # Required: node, link, or risk_group match: - logic: and # "and" or "or" (default: "and") + logic: and # "and" or "or" (default: "and") conditions: - attr: capacity - operator: ">=" + op: ">=" value: 1000 - name: CoreNodes membership: - entity_scope: node + scope: node match: logic: and conditions: - attr: role - operator: "==" + op: "==" value: core - attr: tier - operator: ">=" + op: ">=" value: 2 ``` **Key points:** -- `entity_scope`: Type of entities to match (`node`, `link`, or `risk_group`) +- `scope`: Type of entities to match (`node`, `link`, or `risk_group`) - `match.logic`: Defaults to `"and"` (stricter than other contexts) - `match.conditions`: Uses same operators as selectors - Entities are added to risk group during network build @@ -715,26 +799,47 @@ Automatically create risk groups from unique attribute values: ```yaml risk_groups: - generate: - entity_scope: link # node or link (not risk_group) + scope: link # Required: node or link (not risk_group) group_by: connection_type # Attribute to group by (supports dot-notation) - name_template: "LinkType_${value}" - attrs: # Optional: static attrs for generated groups + name: "LinkType_${value}" + attrs: # Optional: static attrs for generated groups generated: true - generate: - entity_scope: node + scope: node group_by: region - name_template: "Region_${value}" + name: "Region_${value}" +``` + +**Generate block fields:** + +| Field | Required | Description | +|-------|----------|-------------| +| `scope` | Yes | `node` or `link` (cannot generate from risk_groups) | +| `group_by` | Yes | Attribute name (supports dot-notation) | +| `name` | Yes | Template with `${value}` placeholder | +| `path` | No | Regex to filter entities before grouping | +| `attrs` | No | Static attributes for generated groups | + +**Using path to filter entities:** + +```yaml +risk_groups: + - generate: + scope: node + path: "^prod_.*" # Only production nodes + group_by: env + name: "Env_${value}" ``` +This creates risk groups only from nodes matching the path pattern. For example, if you have `prod_srv1`, `prod_srv2` (env: production), and `dev_srv1` (env: development), only `Env_production` is created because `dev_srv1` doesn't match `^prod_.*`. + **Key points:** -- `entity_scope`: `node` or `link` only (cannot generate from risk_groups) -- `group_by`: Attribute name (supports dot-notation) -- `name_template`: Use `${value}` as placeholder for attribute value - Creates one risk group per unique attribute value - Entities with null/missing attribute are skipped - Generated groups are created during network build +- Use `path` to narrow scope before grouping ### Validation @@ -768,16 +873,16 @@ risk_groups: ## Traffic Demands ```yaml -traffic_matrix_set: +demands: production: - source: "^dc1/.*" - sink: "^dc2/.*" - demand: 1000 + target: "^dc2/.*" + volume: 1000 demand_placed: 0.0 # Optional: pre-placed portion mode: combine group_mode: flatten # How to handle grouped nodes priority: 1 - flow_policy_config: SHORTEST_PATHS_ECMP + flow_policy: SHORTEST_PATHS_ECMP attrs: service: web @@ -786,11 +891,11 @@ traffic_matrix_set: match: conditions: - attr: role - operator: "==" + op: "==" value: leaf - sink: + target: group_by: metro - demand: 500 + volume: 500 mode: pairwise priority: 2 ``` @@ -799,8 +904,8 @@ traffic_matrix_set: | Mode | Description | |------|-------------| -| `combine` | Single aggregate flow between source/sink groups via pseudo nodes | -| `pairwise` | Individual flows between all source-sink node pairs | +| `combine` | Single aggregate flow between source/target groups via pseudo nodes | +| `pairwise` | Individual flows between all source-target node pairs | ### Group Modes @@ -808,13 +913,17 @@ When selectors use `group_by`, `group_mode` controls how grouped nodes produce d | Group Mode | Description | |------------|-------------| -| `flatten` | Flatten all groups into single source/sink sets (default) | +| `flatten` | Flatten all groups into single source/target sets (default) | | `per_group` | Create separate demands for each group | | `group_pairwise` | Create pairwise demands between groups | ### Flow Policies -| Policy | Description | +Flow policies can be specified as preset strings or inline configuration objects. + +**Preset strings:** + +| Preset | Description | |--------|-------------| | `SHORTEST_PATHS_ECMP` | IP/IGP routing with equal-split ECMP | | `SHORTEST_PATHS_WCMP` | IP/IGP routing with weighted ECMP (by capacity) | @@ -822,18 +931,41 @@ When selectors use `group_by`, `group_mode` controls how grouped nodes produce d | `TE_ECMP_16_LSP` | MPLS-TE with 16 ECMP LSPs per demand | | `TE_ECMP_UP_TO_256_LSP` | MPLS-TE with up to 256 ECMP LSPs | +**Inline configuration objects:** + +For advanced scenarios, you can specify a custom flow policy as an inline object: + +```yaml +demands: + custom: + - source: A + target: B + volume: 100 + flow_policy: + path_alg: SPF + flow_placement: PROPORTIONAL +``` + +Inline objects are preserved and passed to the analysis engine. The supported fields depend on the underlying NetGraph-Core FlowPolicyConfig. Common fields include: + +- `path_alg`: Path algorithm (`SPF`, etc.) +- `flow_placement`: Flow distribution strategy (`PROPORTIONAL`, `EQUAL_BALANCED`) + +**Note:** Preset strings are recommended for most use cases. Inline objects provide flexibility for specialized routing behaviors but require knowledge of the underlying configuration options. + ### Variable Expansion in Demands ```yaml -traffic_matrix_set: +demands: inter_dc: - source: "^${src_dc}/.*" - sink: "^${dst_dc}/.*" - demand: 100 - expand_vars: - src_dc: [dc1, dc2] - dst_dc: [dc2, dc3] - expansion_mode: cartesian + target: "^${dst_dc}/.*" + volume: 100 + expand: + vars: + src_dc: [dc1, dc2] + dst_dc: [dc2, dc3] + mode: cartesian ``` ## Failure Policies @@ -843,11 +975,11 @@ Failure policies define how nodes, links, and risk groups fail during Monte Carl ### Structure ```yaml -failure_policy_set: +failures: policy_name: attrs: {} # Optional metadata - fail_risk_groups: false # Expand to shared-risk entities - fail_risk_group_children: false # Fail child risk groups recursively + expand_groups: false # Expand to shared-risk entities + expand_children: false # Fail child risk groups recursively modes: # Required: weighted failure modes - weight: 1.0 # Mode selection weight attrs: {} # Optional mode metadata @@ -875,18 +1007,20 @@ modes: ```yaml rules: - - entity_scope: link # Required: node, link, or risk_group - conditions: [] # Optional: filter conditions - logic: or # Condition logic: and | or (default: or) - rule_type: all # Selection: all | choice | random (default: all) - probability: 1.0 # For random: [0.0, 1.0] - count: 1 # For choice: number to select - weight_by: null # For choice: attribute for weighted sampling + - scope: link # Required: node, link, or risk_group + path: "^edge/.*" # Optional regex on entity name/id + match: + conditions: [] # Optional: filter conditions + logic: or # Condition logic: and | or (default: or) + mode: all # Selection: all | choice | random (default: all) + probability: 1.0 # For random: [0.0, 1.0] + count: 1 # For choice: number to select + weight_by: null # For choice: attribute for weighted sampling ``` -### Rule Types +### Rule Modes -| Type | Description | Parameters | +| Mode | Description | Parameters | |------|-------------|------------| | `all` | Select all matching entities | None | | `choice` | Random sample from matches | `count`, optional `weight_by` | @@ -894,31 +1028,31 @@ rules: ### Condition Logic -When multiple conditions are specified: +When multiple `match.conditions` are specified: | Logic | Behavior | |-------|----------| | `or` | Entity matches if **any** condition is true | | `and` | Entity matches if **all** conditions are true | -If no conditions are specified, all entities of the given scope match. +If `match` is omitted or `match.conditions` is empty, all entities of the given scope match. **Context-specific defaults**: | Context | Default `logic` | Rationale | |---------|-----------------|-----------| -| Adjacency `match` | `"or"` | Inclusive: match any condition | +| Link `match` | `"or"` | Inclusive: match any condition | | Demand `match` | `"or"` | Inclusive: match any condition | | Membership rules | `"and"` | Precise: must match all conditions | | Failure rules | `"or"` | Inclusive: match any condition | ### Weighted Sampling (choice mode) -When `weight_by` is set for `rule_type: choice`: +When `weight_by` is set for `mode: choice`: ```yaml -- entity_scope: link - rule_type: choice +- scope: link + mode: choice count: 2 weight_by: capacity # Sample proportional to capacity attribute ``` @@ -930,13 +1064,13 @@ When `weight_by` is set for `rule_type: choice`: ### Risk Group Expansion ```yaml -fail_risk_groups: true +expand_groups: true ``` When enabled, after initial failures are selected, expands to fail all entities that share a risk group with any failed entity (BFS traversal). ```yaml -fail_risk_group_children: true +expand_children: true ``` When enabled and a risk_group is marked as failed, recursively fails all child risk groups. @@ -944,39 +1078,40 @@ When enabled and a risk_group is marked as failed, recursively fails all child r ### Complete Example ```yaml -failure_policy_set: +failures: weighted_modes: attrs: description: "Balanced failure simulation" - fail_risk_groups: true - fail_risk_group_children: false + expand_groups: true + expand_children: false modes: # 30% chance: fail 1 risk group weighted by distance - weight: 0.3 rules: - - entity_scope: risk_group - rule_type: choice + - scope: risk_group + mode: choice count: 1 weight_by: distance_km # 50% chance: fail 1 non-core node weighted by capacity - weight: 0.5 rules: - - entity_scope: node - rule_type: choice + - scope: node + mode: choice count: 1 - conditions: - - attr: role - operator: "!=" - value: core - logic: and + match: + logic: and + conditions: + - attr: role + op: "!=" + value: core weight_by: attached_capacity_gbps # 20% chance: random link failures with 1% probability each - weight: 0.2 rules: - - entity_scope: link - rule_type: random + - scope: link + mode: random probability: 0.01 ``` @@ -992,36 +1127,36 @@ failure_policy_set: ```yaml workflow: - - step_type: NetworkStats + - type: NetworkStats name: baseline_stats - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd_baseline - matrix_name: production + demand_set: production acceptance_rule: hard alpha_start: 1.0 growth_factor: 2.0 resolution: 0.05 - - step_type: TrafficMatrixPlacement + - type: TrafficMatrixPlacement name: tm_placement - matrix_name: production + demand_set: production failure_policy: weighted_modes iterations: 1000 parallelism: 8 alpha_from_step: msd_baseline alpha_from_field: data.alpha_star - - step_type: MaxFlow + - type: MaxFlow name: capacity_matrix source: "^(dc[1-3])$" - sink: "^(dc[1-3])$" + target: "^(dc[1-3])$" mode: pairwise failure_policy: single_link iterations: 500 baseline: true - - step_type: CostPower + - type: CostPower name: cost_analysis include_disabled: true aggregation_level: 2 @@ -1041,7 +1176,7 @@ workflow: ### BuildGraph Parameters ```yaml -- step_type: BuildGraph +- type: BuildGraph name: build_graph add_reverse: true # Add reverse edges for bidirectional connectivity ``` @@ -1049,7 +1184,7 @@ workflow: ### NetworkStats Parameters ```yaml -- step_type: NetworkStats +- type: NetworkStats name: stats include_disabled: false # Include disabled nodes/links in stats excluded_nodes: [] # Optional: temporary node exclusions @@ -1061,10 +1196,10 @@ workflow: Baseline (no failures) is always run first as a reference. The `iterations` parameter specifies how many failure scenarios to run. ```yaml -- step_type: MaxFlow +- type: MaxFlow name: capacity_analysis source: "^servers/.*" - sink: "^storage/.*" + target: "^storage/.*" mode: combine # combine | pairwise failure_policy: policy_name iterations: 1000 @@ -1083,9 +1218,9 @@ Baseline (no failures) is always run first as a reference. The `iterations` para Baseline (no failures) is always run first as a reference. The `iterations` parameter specifies how many failure scenarios to run. ```yaml -- step_type: TrafficMatrixPlacement +- type: TrafficMatrixPlacement name: tm_placement - matrix_name: default + demand_set: default failure_policy: policy_name iterations: 100 parallelism: auto @@ -1104,9 +1239,9 @@ Baseline (no failures) is always run first as a reference. The `iterations` para ### MaximumSupportedDemand Parameters ```yaml -- step_type: MaximumSupportedDemand +- type: MaximumSupportedDemand name: msd - matrix_name: default + demand_set: default acceptance_rule: hard # Currently only "hard" supported alpha_start: 1.0 # Starting alpha for search growth_factor: 2.0 # Growth factor for bracketing (> 1.0) @@ -1122,7 +1257,7 @@ Baseline (no failures) is always run first as a reference. The `iterations` para ### CostPower Parameters ```yaml -- step_type: CostPower +- type: CostPower name: cost_power include_disabled: false # Include disabled nodes/links aggregation_level: 2 # Hierarchy level for aggregation (split by /) @@ -1130,13 +1265,13 @@ Baseline (no failures) is always run first as a reference. The `iterations` para ## Selector Reference -Selectors work across adjacency, demands, and workflows. +Selectors work across links, demands, and workflows. ### Selection Patterns The DSL uses two distinct selection patterns: -**Path-based Node Selection** (adjacency, demands, workflows): +**Path-based Node Selection** (links, demands, workflows): - Works on node entities - Uses regex patterns on hierarchical node names (`path`) @@ -1147,7 +1282,7 @@ The DSL uses two distinct selection patterns: **Condition-based Entity Selection** (failure rules, membership rules): -- Works on nodes, links, or risk_groups (`entity_scope`) +- Works on nodes, links, or risk_groups (`scope`) - Uses only attribute-based filtering (`conditions`) - No path/regex patterns (operates on all entities of specified type) - See Failure Policies section for details @@ -1170,7 +1305,7 @@ source: logic: and conditions: - attr: role - operator: "==" + op: "==" value: spine active_only: true # Exclude disabled nodes ``` @@ -1183,8 +1318,8 @@ The `active_only` field has context-dependent defaults: | Context | Default | Rationale | |---------|---------|-----------| -| `adjacency` | `false` | Links to disabled nodes are created | -| `override` | `false` | Overrides can target disabled nodes | +| `links` | `false` | Links to disabled nodes are created | +| `node_rules` | `false` | Rules can target disabled nodes | | `demand` | `true` | Traffic only between active nodes | | `workflow` | `true` | Analysis uses active nodes only | @@ -1213,8 +1348,8 @@ vars: network: nodes: - spine-1: {attrs: {<<: *attrs, <<: *spine_cfg, capacity: *cap}} - spine-2: {attrs: {<<: *attrs, <<: *spine_cfg, capacity: *cap, region: "dc2"}} + spine1: {attrs: {<<: *attrs, <<: *spine_cfg, capacity: *cap}} + spine2: {attrs: {<<: *attrs, <<: *spine_cfg, capacity: *cap, region: "dc2"}} ``` Anchors are resolved during YAML parsing, before schema validation. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index cc7a891..1cf412e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,7 +11,7 @@ repos: hooks: - id: pyright args: [--project, pyproject.toml] - additional_dependencies: ['networkx', 'pyyaml', 'pandas', 'pandas-stubs'] + additional_dependencies: ['networkx', 'pyyaml', 'pandas', 'pandas-stubs', 'netgraph-core'] - repo: https://github.com/pre-commit/pre-commit-hooks rev: v5.0.0 diff --git a/CHANGELOG.md b/CHANGELOG.md index 2560975..0c3f5f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.17.0] - 2026-01-10 + +### Changed + +- **BREAKING**: DSL syntax refinement with renamed fields and restructured expansion blocks; see updated [DSL reference](docs/reference/dsl.md) + ## [0.16.0] - 2025-12-21 ### Changed diff --git a/LICENSE b/LICENSE index 226688b..be3f7b2 100644 --- a/LICENSE +++ b/LICENSE @@ -1,35 +1,35 @@ -GNU AFFERO GENERAL PUBLIC LICENSE -Version 3, 19 November 2007 + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 -Copyright (C) 2007 Free Software Foundation, Inc. -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. -Preamble + Preamble -The GNU Affero General Public License is a free, copyleft license for + The GNU Affero General Public License is a free, copyleft license for software and other kinds of works, specifically designed to ensure cooperation with the community in the case of network server software. -The licenses for most software and other practical works are designed + The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, our General Public Licenses are intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. -When we speak of free software, we are referring to freedom, not + When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. -Developers that use our General Public Licenses protect your rights + Developers that use our General Public Licenses protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License which gives you legal permission to copy, distribute and/or modify the software. -A secondary benefit of defending all users' freedom is that + A secondary benefit of defending all users' freedom is that improvements made in alternate versions of the program, if they receive widespread use, become available for other developers to incorporate. Many developers of free software are heartened and @@ -39,7 +39,7 @@ The GNU General Public License permits making a modified version and letting the public access it on a server without ever releasing its source code to the public. -The GNU Affero General Public License is designed specifically to + The GNU Affero General Public License is designed specifically to ensure that, in such cases, the modified source code becomes available to the community. It requires the operator of a network server to provide the source code of the modified version running there to the @@ -47,48 +47,48 @@ users of that server. Therefore, public use of a modified version, on a publicly accessible server, gives the public access to the source code of the modified version. -An older license, called the Affero General Public License and + An older license, called the Affero General Public License and published by Affero, was designed to accomplish similar goals. This is a different license, not a version of the Affero GPL, but Affero has released a new version of the Affero GPL which permits relicensing under this license. -The precise terms and conditions for copying, distribution and + The precise terms and conditions for copying, distribution and modification follow. -TERMS AND CONDITIONS + TERMS AND CONDITIONS -0. Definitions. + 0. Definitions. -"This License" refers to version 3 of the GNU Affero General Public License. + "This License" refers to version 3 of the GNU Affero General Public License. -"Copyright" also means copyright-like laws that apply to other kinds of + "Copyright" also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. -"The Program" refers to any copyrightable work licensed under this + "The Program" refers to any copyrightable work licensed under this License. Each licensee is addressed as "you". "Licensees" and "recipients" may be individuals or organizations. -To "modify" a work means to copy from or adapt all or part of the work + To "modify" a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a "modified version" of the earlier work or a work "based on" the earlier work. -A "covered work" means either the unmodified Program or a work based + A "covered work" means either the unmodified Program or a work based on the Program. -To "propagate" a work means to do anything with it that, without + To "propagate" a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. -To "convey" a work means any kind of propagation that enables other + To "convey" a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. -An interactive user interface displays "Appropriate Legal Notices" + An interactive user interface displays "Appropriate Legal Notices" to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the @@ -97,21 +97,534 @@ work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. -1. Source Code. + 1. Source Code. -The "source code" for a work means the preferred form of the work + The "source code" for a work means the preferred form of the work for making modifications to it. "Object code" means any non-source form of a work. -... (AGPLv3 full text continues; include the entire standard text without modification) ... - -How to Apply These Terms to Your New Programs - -If you develop a new program, and you want it to be of the greatest + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. -To do so, attach the following notices to the program. It is safest + To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively state the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. @@ -134,7 +647,7 @@ the "copyright" line and a pointer to where the full notice is found. Also add information on how to contact you by electronic and paper mail. -If your software can interact with users remotely through a computer + If your software can interact with users remotely through a computer network, you should also make sure that it provides a way for users to get its source. For example, if your program is a web application, its interface could display a "Source" link that leads users to an archive @@ -142,7 +655,7 @@ of the code. There are many ways you could offer source, and different solutions will be better for different programs; see section 13 for the specific requirements. -You should also get your employer (if you work as a programmer) or school, + You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU AGPL, see . diff --git a/README.md b/README.md index fe14050..61bd3bc 100644 --- a/README.md +++ b/README.md @@ -2,80 +2,24 @@ [![Python-test](https://github.com/networmix/NetGraph/actions/workflows/python-test.yml/badge.svg?branch=main)](https://github.com/networmix/NetGraph/actions/workflows/python-test.yml) -Scenario-driven network modeling and analysis framework combining Python's flexibility with high-performance C++ algorithms. +Network modeling and analysis framework combining Python with high-performance C++ graph algorithms. -## Overview +## What It Does -NetGraph enables declarative modeling of network topologies, traffic matrices, and failure scenarios. It delegates computationally intensive graph algorithms to [NetGraph-Core](https://github.com/networmix/NetGraph-Core) while providing a rich Python API and CLI for orchestration. +NetGraph lets you model network topologies, traffic demands, and failure scenarios - then analyze capacity and resilience. Define networks in Python or declarative YAML, run max-flow and failure simulations, and export reproducible JSON results. Compute-intensive algorithms run in C++ with the GIL released. -## Architecture - -NetGraph employs a **hybrid Python+C++ architecture**: - -- **Python layer (NetGraph)**: Scenario DSL parsing, workflow orchestration, result aggregation, and high-level APIs. -- **C++ layer (NetGraph-Core)**: Performance-critical graph algorithms (SPF, KSP, Max-Flow) executing in optimized C++ with the GIL released. - -## Key Features - -### 1. Modeling & DSL - -- **Declarative Scenarios**: Define topology, traffic, and workflows in validated YAML. -- **Blueprints**: Reusable topology templates (e.g., Clos fabrics, regions) with parameterized expansion. -- **Strict Multigraph**: Deterministic graph representation with stable edge IDs. - -### 2. Failure Analysis - -- **Policy Engine**: Weighted failure modes with multiple policy rules per mode. -- **Non-Destructive**: Runtime exclusions simulate failures without modifying the base topology. -- **Risk Groups**: Model shared fate (e.g., fiber cuts, power zones). - -### 3. Traffic Engineering - -- **Routing Modes**: Unified modeling of **IP Routing** (static costs, oblivious to congestion) and **Traffic Engineering** (dynamic residuals, congestion-aware). -- **Flow Placement**: Strategies for **ECMP** (Equal-Cost Multi-Path) and **WCMP** (Weighted Cost Multi-Path). -- **Capacity Analysis**: Compute max-flow envelopes and demand allocation with configurable placement policies. - -### 4. Workflow & Integration - -- **Structured Results**: Export analysis artifacts to JSON for downstream processing. -- **CLI**: Comprehensive command-line interface for validation and execution. -- **Python API**: Full programmatic access to all modeling and solving capabilities. - -## Installation - -### From PyPI +## Install ```bash pip install ngraph ``` -### From Source - -```bash -git clone https://github.com/networmix/NetGraph -cd NetGraph -make dev # Install in editable mode with dev dependencies -make check # Run full test suite -``` - -## Quick Start - -### CLI Usage - -```bash -# Validate and inspect a scenario -ngraph inspect scenarios/backbone_clos.yml --detail - -# Run analysis workflow -ngraph run scenarios/backbone_clos.yml --results clos.results.json -``` - -### Python API +## Python API ```python from ngraph import Network, Node, Link, analyze, Mode -# Build network programmatically +# Build a simple network network = Network() network.add_node(Node("A")) network.add_node(Node("B")) @@ -83,19 +27,14 @@ network.add_node(Node("C")) network.add_link(Link("A", "B", capacity=10.0, cost=1.0)) network.add_link(Link("B", "C", capacity=10.0, cost=1.0)) -# Compute max flow with the analyze() API -flow = analyze(network).max_flow("^A$", "^C$", mode=Mode.COMBINE) -print(f"Max flow: {flow}") # {('^A$', '^C$'): 10.0} - -# Efficient repeated analysis with bound context -ctx = analyze(network, source="^A$", sink="^C$", mode=Mode.COMBINE) -baseline = ctx.max_flow() -degraded = ctx.max_flow(excluded_nodes={"B"}) # Test failure scenario +# Compute max flow +result = analyze(network).max_flow("^A$", "^C$", mode=Mode.COMBINE) +print(result) # {('^A$', '^C$'): 10.0} ``` -## Example Scenario +## Scenario DSL -NetGraph scenarios define topology, configuration, and analysis steps in a unified YAML file. This example demonstrates **blueprints** for modular topology definition: +For reproducible analysis workflows, define topology, traffic, demands, and failure policies in YAML: ```yaml seed: 42 @@ -103,89 +42,101 @@ seed: 42 # Define reusable topology templates blueprints: Clos_Fabric: - groups: - spine: {node_count: 2, name_template: "spine{node_num}"} - leaf: {node_count: 4, name_template: "leaf{node_num}"} - adjacency: - - source: /leaf - target: /spine - pattern: mesh - link_params: {capacity: 100, cost: 1} - - source: /spine - target: /leaf - pattern: mesh - link_params: {capacity: 100, cost: 1} + nodes: + spine: { count: 2, template: "spine{n}" } + leaf: { count: 4, template: "leaf{n}" } + links: + - source: /leaf + target: /spine + pattern: mesh + capacity: 100 + cost: 1 + - source: /spine + target: /leaf + pattern: mesh + capacity: 100 + cost: 1 # Instantiate network from templates network: - groups: - site1: {use_blueprint: Clos_Fabric} - site2: {use_blueprint: Clos_Fabric} - adjacency: - - source: {path: site1/spine} - target: {path: site2/spine} - pattern: one_to_one - link_params: {capacity: 50, cost: 10} - -# Define traffic matrix -traffic_matrix_set: + nodes: + site1: { blueprint: Clos_Fabric } + site2: { blueprint: Clos_Fabric } + links: + - source: { path: site1/spine } + target: { path: site2/spine } + pattern: one_to_one + capacity: 50 + cost: 10 + +# Define failure policy for Monte Carlo analysis +failures: + random_link: + modes: + - weight: 1.0 + rules: + - scope: link + mode: choice + count: 1 + +# Define traffic demands +demands: global_traffic: - source: ^site1/leaf/ - sink: ^site2/leaf/ - demand: 100.0 + target: ^site2/leaf/ + volume: 100.0 mode: combine - flow_policy_config: SHORTEST_PATHS_ECMP + flow_policy: SHORTEST_PATHS_ECMP -# Define analysis workflow +# Analysis workflow: find max capacity, then test under failures workflow: -- step_type: NetworkStats - name: stats -- step_type: MaxFlow - name: site_capacity - source: ^site1/leaf/ - sink: ^site2/leaf/ - mode: combine - shortest_path: false -- step_type: MaximumSupportedDemand - name: max_demand - matrix_name: global_traffic + - type: NetworkStats + name: stats + - type: MaxFlow + name: site_capacity + source: ^site1/leaf/ + target: ^site2/leaf/ + mode: combine + - type: MaximumSupportedDemand + name: max_demand + demand_set: global_traffic + - type: TrafficMatrixPlacement + name: placement_at_max + demand_set: global_traffic + alpha_from_step: max_demand # Use alpha_star from MSD step + failure_policy: random_link + iterations: 100 ``` -## Repository Structure - -```text -ngraph/ # Python package source - dsl/ # Scenario parsing and blueprint expansion - model/ # Network and flow domain models - solver/ # Algorithms and Core wrappers - workflow/ # Analysis steps and orchestration -scenarios/ # Example scenario definitions -tests/ # Pytest suite (unit and integration) -docs/ # Documentation source (MkDocs) -dev/ # Development tools and scripts +```bash +ngraph run scenario.yml --output results/ ``` -## Development +This scenario builds a dual-site Clos fabric from blueprints, finds the maximum supportable demand, then runs 100 Monte Carlo iterations with random link failures - exporting results to JSON. -```bash -make dev # Setup environment -make check # Run tests and linting -make lint # Run linting only -make test # Run tests only -make docs-serve # Preview documentation -``` +See [DSL Reference](https://networmix.github.io/NetGraph/reference/dsl/) and [Examples](https://networmix.github.io/NetGraph/examples/clos-fabric/) for more. -## Requirements +## Capabilities -- **Python**: 3.9+ -- **NetGraph-Core**: Compatible C++ backend version +- **Declarative scenarios** with schema validation, reusable blueprints, and strict multigraph representation +- **Failure analysis** via policy engine with weighted modes, risk groups, and non-destructive runtime exclusions +- **Routing modes** for IP routing (cost-based) and traffic engineering (capacity-aware) +- **Flow placement** strategies for ECMP and WCMP with max-flow and capacity envelopes +- **Reproducible results** via seeded randomness and stable edge IDs +- **C++ performance** with GIL released via [NetGraph-Core](https://github.com/networmix/NetGraph-Core) ## Documentation -- **Site**: [networmix.github.io/NetGraph](https://networmix.github.io/NetGraph/) -- **Tutorial**: [Getting Started](https://networmix.github.io/NetGraph/getting-started/tutorial/) -- **Reference**: [API](https://networmix.github.io/NetGraph/reference/api/) | [CLI](https://networmix.github.io/NetGraph/reference/cli/) | [DSL](https://networmix.github.io/NetGraph/reference/dsl/) +- [**Tutorial**](https://networmix.github.io/NetGraph/getting-started/tutorial/) - Getting started guide +- [**Examples**](https://networmix.github.io/NetGraph/examples/clos-fabric/) - Clos fabric, failure analysis, and more +- [**DSL Reference**](https://networmix.github.io/NetGraph/reference/dsl/) - YAML scenario syntax +- [**API Reference**](https://networmix.github.io/NetGraph/reference/api/) - Python API docs ## License [GNU Affero General Public License v3.0 or later](LICENSE) + +## Requirements + +- Python 3.11+ +- NetGraph-Core (installed automatically) diff --git a/dev/generate_api_docs.py b/dev/generate_api_docs.py index 434026e..36d84b0 100755 --- a/dev/generate_api_docs.py +++ b/dev/generate_api_docs.py @@ -199,7 +199,7 @@ def discover_modules(): # Sort modules in logical order for documentation def module_sort_key(module_name): - """Sort key to organize modules logically after refactor.""" + """Sort key to organize modules in logical documentation order.""" parts = module_name.split(".") # Main ngraph modules first (ngraph.xxx) if len(parts) == 2: diff --git a/docs/examples/basic.md b/docs/examples/basic.md index 13fad69..b0ced78 100644 --- a/docs/examples/basic.md +++ b/docs/examples/basic.md @@ -44,38 +44,32 @@ network: # Parallel edges between A->B - source: A target: B - link_params: - capacity: 1 - cost: 1 + capacity: 1 + cost: 1 - source: A target: B - link_params: - capacity: 2 - cost: 1 + capacity: 2 + cost: 1 # Parallel edges between B->C - source: B target: C - link_params: - capacity: 1 - cost: 1 + capacity: 1 + cost: 1 - source: B target: C - link_params: - capacity: 2 - cost: 1 + capacity: 2 + cost: 1 # Alternative path A->D->C - source: A target: D - link_params: - capacity: 3 - cost: 2 + capacity: 3 + cost: 2 - source: D target: C - link_params: - capacity: 3 - cost: 2 + capacity: 3 + cost: 2 """ # Create the network @@ -83,7 +77,7 @@ scenario = Scenario.from_yaml(scenario_yaml) network = scenario.network ``` -Note that here we used a simple `nodes` and `links` structure to directly define the network topology. The optional `seed` parameter ensures reproducible results when using randomized workflow steps. In more complex scenarios, you would typically use `groups` and `adjacency` to define groups of nodes and their connections, or even leverage the `blueprints` to create reusable components. This advanced functionality is explained in the [DSL Reference](../reference/dsl.md) and used in the [Clos Fabric Analysis](clos-fabric.md) example. +Note that here we used a simple `nodes` and `links` structure to directly define the network topology. The optional `seed` parameter ensures reproducible results when using randomized workflow steps. In more complex scenarios, you would typically use node groups with `count` and `template` to define groups of nodes and link rules to define their connections, or even leverage the `blueprints` to create reusable components. This advanced functionality is explained in the [DSL Reference](../reference/dsl.md) and used in the [Clos Fabric Analysis](clos-fabric.md) example. ### Flow Analysis Variants @@ -138,7 +132,7 @@ result = analyze(network).max_flow_detailed( ) # Extract flow value and summary -(src_label, sink_label), summary = next(iter(result.items())) +(src_label, target_label), summary = next(iter(result.items())) print(f"Total flow: {summary.total_flow}") print(f"Cost distribution: {summary.cost_distribution}") diff --git a/docs/examples/bundled-scenarios.md b/docs/examples/bundled-scenarios.md index 95e3319..0592554 100644 --- a/docs/examples/bundled-scenarios.md +++ b/docs/examples/bundled-scenarios.md @@ -7,7 +7,7 @@ NetGraph ships with ready-to-run scenarios that demonstrate the DSL, workflow st Inspect first, then run: ```bash -# Inspect (structure, steps, matrices, failure policies) +# Inspect (structure, steps, demands, failure policies) ngraph inspect scenarios/backbone_clos.yml --detail # Run and write JSON results next to the scenario (or under --output) @@ -21,8 +21,8 @@ You can filter output by workflow step names with `--keys` (see each scenario se - **Purpose**: Toy 4-node full mesh to exercise MSD search, TM placement, and pairwise MaxFlow. - **Highlights**: - - Failure policy: single link choice (`failure_policy_set.single_link_failure`) - - Traffic matrix: pairwise demands across all nodes (`baseline_traffic_matrix`) + - Failure policy: single link choice (`failures.single_link_failure`) + - Demand set: pairwise demands across all nodes (`baseline_traffic_matrix`) - Workflow steps: `msd_baseline`, `tm_placement`, `node_to_node_capacity_matrix` Run: @@ -40,9 +40,9 @@ ngraph run scenarios/square_mesh.yaml --keys msd_baseline --stdout - **Purpose**: Small Clos/metro fabric with components, SRLG-like risk groups, and multi-step workflow. - **Highlights**: - - Uses `blueprints`, attribute-based adjacency selectors, and hardware component attrs - - Failure policy: weighted multi-mode (`failure_policy_set.weighted_modes`) - - Traffic matrix: inter-metro DC flows with TE/WCMP policy + - Uses `blueprints`, attribute-based link selectors, and hardware component attrs + - Failure policy: weighted multi-mode (`failures.weighted_modes`) + - Demand set: inter-metro DC flows with TE/WCMP policy - Workflow steps: `network_statistics`, `msd_baseline`, `tm_placement`, `cost_power` Run: @@ -76,4 +76,4 @@ ngraph run scenarios/nsfnet.yaml --keys node_to_node_capacity_matrix_1 --stdout ## Notes on results -All runs emit a consistent JSON shape with `workflow`, `steps`, and `scenario` sections. Steps like `MaxFlow` and `TrafficMatrixPlacement` store per-iteration lists under `data.flow_results` with `summary` and optional `cost_distribution` or `min_cut` fields. See Reference → Workflow for the exact schema. +All runs emit a consistent JSON shape with `workflow`, `steps`, and `scenario` sections. Steps like `MaxFlow` and `TrafficMatrixPlacement` store per-iteration lists under `data.flow_results` with `summary` and optional `cost_distribution` or `min_cut` fields. See Reference -> Workflow for the exact schema. diff --git a/docs/examples/clos-fabric.md b/docs/examples/clos-fabric.md index 3ff004a..143d22f 100644 --- a/docs/examples/clos-fabric.md +++ b/docs/examples/clos-fabric.md @@ -9,7 +9,7 @@ Refer to [Tutorial](../getting-started/tutorial.md) for running bundled scenario We'll create two separate 3-tier Clos networks and analyze the maximum flow capacity between them. This scenario showcases: - Hierarchical blueprint composition -- Complex adjacency patterns +- Complex link patterns - Flow analysis with different placement policies ## Programmatic scenario @@ -21,65 +21,61 @@ from ngraph import analyze, Mode, FlowPlacement scenario_yaml = """ blueprints: brick_2tier: - groups: + nodes: t1: - node_count: 8 - name_template: "t1-{node_num}" + count: 8 + template: "t1-{n}" t2: - node_count: 8 - name_template: "t2-{node_num}" + count: 8 + template: "t2-{n}" - adjacency: + links: - source: /t1 target: /t2 pattern: mesh - link_params: - capacity: 2 - cost: 1 + capacity: 2 + cost: 1 3tier_clos: - groups: + nodes: b1: - use_blueprint: brick_2tier + blueprint: brick_2tier b2: - use_blueprint: brick_2tier + blueprint: brick_2tier spine: - node_count: 64 - name_template: "t3-{node_num}" + count: 64 + template: "t3-{n}" - adjacency: + links: - source: b1/t2 target: spine pattern: one_to_one - link_params: - capacity: 2 - cost: 1 + capacity: 2 + cost: 1 - source: b2/t2 target: spine pattern: one_to_one - link_params: - capacity: 2 - cost: 1 + capacity: 2 + cost: 1 network: name: "3tier_clos_network" version: 1.0 - groups: + nodes: my_clos1: - use_blueprint: 3tier_clos + blueprint: 3tier_clos my_clos2: - use_blueprint: 3tier_clos + blueprint: 3tier_clos - adjacency: + links: - source: my_clos1/spine target: my_clos2/spine pattern: one_to_one - link_count: 4 - link_params: - capacity: 1 - cost: 1 + count: 4 + capacity: 1 + cost: 1 """ # Create and analyze the scenario @@ -104,7 +100,7 @@ print(f"Maximum flow with ECMP: {max_flow_ecmp}") The result `{('b1|b2', 'b1|b2'): 256.0}` means: - **Source**: All t1 nodes in both b1 and b2 segments of my_clos1 -- **Sink**: All t1 nodes in both b1 and b2 segments of my_clos2 +- **Target**: All t1 nodes in both b1 and b2 segments of my_clos2 - **Capacity**: Maximum flow of 256.0 units ## ECMP vs WCMP: Impact of Link Failures @@ -133,26 +129,26 @@ from ngraph.scenario import Scenario scenario_yaml = """ blueprints: brick_2tier: - groups: - t1: {node_count: 8, name_template: "t1-{node_num}"} - t2: {node_count: 8, name_template: "t2-{node_num}"} - adjacency: - - {source: /t1, target: /t2, pattern: mesh, link_params: {capacity: 2, cost: 1}} + nodes: + t1: {count: 8, template: "t1-{n}"} + t2: {count: 8, template: "t2-{n}"} + links: + - {source: /t1, target: /t2, pattern: mesh, capacity: 2, cost: 1} 3tier_clos: - groups: - b1: {use_blueprint: brick_2tier} - b2: {use_blueprint: brick_2tier} - spine: {node_count: 64, name_template: "t3-{node_num}"} - adjacency: - - {source: b1/t2, target: spine, pattern: one_to_one, link_params: {capacity: 2, cost: 1}} - - {source: b2/t2, target: spine, pattern: one_to_one, link_params: {capacity: 2, cost: 1}} + nodes: + b1: {blueprint: brick_2tier} + b2: {blueprint: brick_2tier} + spine: {count: 64, template: "t3-{n}"} + links: + - {source: b1/t2, target: spine, pattern: one_to_one, capacity: 2, cost: 1} + - {source: b2/t2, target: spine, pattern: one_to_one, capacity: 2, cost: 1} network: name: 3tier_clos_network - groups: - my_clos1: {use_blueprint: 3tier_clos} - my_clos2: {use_blueprint: 3tier_clos} - adjacency: - - {source: my_clos1/spine, target: my_clos2/spine, pattern: one_to_one, link_count: 4, link_params: {capacity: 1, cost: 1}} + nodes: + my_clos1: {blueprint: 3tier_clos} + my_clos2: {blueprint: 3tier_clos} + links: + - {source: my_clos1/spine, target: my_clos2/spine, pattern: one_to_one, count: 4, capacity: 1, cost: 1} """ scenario = Scenario.from_yaml(scenario_yaml) diff --git a/docs/getting-started/tutorial.md b/docs/getting-started/tutorial.md index 5de7a4f..d2568a3 100644 --- a/docs/getting-started/tutorial.md +++ b/docs/getting-started/tutorial.md @@ -5,7 +5,7 @@ This guide shows the fastest way to run a scenario from the CLI and a minimal pr ## CLI: run and inspect ```bash -# Inspect (validate and preview structure, steps, matrices) +# Inspect (validate and preview structure, steps, demands) ngraph inspect scenarios/square_mesh.yaml --detail # Run and store results (JSON) next to the scenario or under --output @@ -28,9 +28,9 @@ network: A: {} B: {} links: - - {source: A, target: B, link_params: {capacity: 10.0, cost: 1.0}} + - {source: A, target: B, capacity: 10.0, cost: 1.0} workflow: - - step_type: NetworkStats + - type: NetworkStats name: baseline_stats """ diff --git a/docs/reference/api-full.md b/docs/reference/api-full.md index ed0eeda..12aef06 100644 --- a/docs/reference/api-full.md +++ b/docs/reference/api-full.md @@ -12,7 +12,7 @@ Quick links: - [CLI Reference](cli.md) - [DSL Reference](dsl.md) -Generated from source code on: December 22, 2025 at 01:21 UTC +Generated from source code on: January 15, 2026 at 14:12 UTC Modules auto-discovered: 53 @@ -261,7 +261,7 @@ Typical usage example: - `network` (Network) - `workflow` (List[WorkflowStep]) - `failure_policy_set` (FailurePolicySet) = FailurePolicySet(policies={}) -- `traffic_matrix_set` (TrafficMatrixSet) = TrafficMatrixSet(matrices={}) +- `demand_set` (DemandSet) = DemandSet(sets={}) - `results` (Results) = Results(_store={}, _metadata={}, _active_step=None, _scenario={}) - `components_library` (ComponentsLibrary) = ComponentsLibrary(components={}) - `seed` (Optional[int]) @@ -412,20 +412,20 @@ Returns: ## ngraph.model.demand.builder -Builders for traffic matrices. +Builders for demand sets. -Construct `TrafficMatrixSet` from raw dictionaries (e.g. parsed YAML). +Construct `DemandSet` from raw dictionaries (e.g. parsed YAML). -### build_traffic_matrix_set(raw: 'Dict[str, List[dict]]') -> 'TrafficMatrixSet' +### build_demand_set(raw: 'Dict[str, List[dict]]') -> 'DemandSet' -Build a `TrafficMatrixSet` from a mapping of name -> list of dicts. +Build a `DemandSet` from a mapping of name -> list of dicts. Args: - raw: Mapping where each key is a matrix name and each value is a list of + raw: Mapping where each key is a demand set name and each value is a list of dictionaries with `TrafficDemand` constructor fields. Returns: - Initialized `TrafficMatrixSet` with constructed `TrafficDemand` objects. + Initialized `DemandSet` with constructed `TrafficDemand` objects. Raises: ValueError: If ``raw`` is not a mapping of name -> list[dict], @@ -435,32 +435,32 @@ Raises: ## ngraph.model.demand.matrix -Traffic matrix containers. +Demand set containers. -Provides `TrafficMatrixSet`, a named collection of `TrafficDemand` lists +Provides `DemandSet`, a named collection of `TrafficDemand` lists used as input to demand expansion and placement. This module contains input containers, not analysis results. -### TrafficMatrixSet +### DemandSet Named collection of TrafficDemand lists. -This mutable container maps scenario names to lists of TrafficDemand objects, -allowing management of multiple traffic matrices for analysis. +This mutable container maps set names to lists of TrafficDemand objects, +allowing management of multiple demand sets for analysis. Attributes: - matrices: Dictionary mapping scenario names to TrafficDemand lists. + sets: Dictionary mapping set names to TrafficDemand lists. **Attributes:** -- `matrices` (dict[str, list[TrafficDemand]]) = {} +- `sets` (dict[str, list[TrafficDemand]]) = {} **Methods:** -- `add(self, name: 'str', demands: 'list[TrafficDemand]') -> 'None'` - Add a traffic matrix to the collection. -- `get_all_demands(self) -> 'list[TrafficDemand]'` - Get all traffic demands from all matrices combined. -- `get_default_matrix(self) -> 'list[TrafficDemand]'` - Get default traffic matrix. -- `get_matrix(self, name: 'str') -> 'list[TrafficDemand]'` - Get a specific traffic matrix by name. +- `add(self, name: 'str', demands: 'list[TrafficDemand]') -> 'None'` - Add a demand list to the collection. +- `get_all_demands(self) -> 'list[TrafficDemand]'` - Get all traffic demands from all sets combined. +- `get_default_set(self) -> 'list[TrafficDemand]'` - Get default demand set. +- `get_set(self, name: 'str') -> 'list[TrafficDemand]'` - Get a specific demand set by name. - `to_dict(self) -> 'dict[str, Any]'` - Convert to dictionary for JSON serialization. --- @@ -479,33 +479,29 @@ Traffic demand specification using unified selectors. Attributes: source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). - demand: Total demand volume. - demand_placed: Portion of this demand placed so far. + target: Target node selector (string path or selector dict). + volume: Total demand volume. + volume_placed: Portion of this demand placed so far. priority: Priority class (lower = higher priority). mode: Node pairing mode ("combine" or "pairwise"). group_mode: How grouped nodes produce demands ("flatten", "per_group", "group_pairwise"). - expand_vars: Variable substitutions using $var syntax. - expansion_mode: How to combine expand_vars ("cartesian" or "zip"). - flow_policy_config: Policy preset for routing. - flow_policy: Concrete policy instance (overrides flow_policy_config). + flow_policy: Policy preset for routing. + flow_policy_obj: Concrete policy instance (overrides flow_policy). attrs: Arbitrary user metadata. id: Unique identifier. Auto-generated if empty. **Attributes:** - `source` (Union) -- `sink` (Union) -- `demand` (float) = 0.0 -- `demand_placed` (float) = 0.0 +- `target` (Union) +- `volume` (float) = 0.0 +- `volume_placed` (float) = 0.0 - `priority` (int) = 0 - `mode` (str) = combine - `group_mode` (str) = flatten -- `expand_vars` (Dict) = {} -- `expansion_mode` (str) = cartesian -- `flow_policy_config` (Optional) - `flow_policy` (Optional) +- `flow_policy_obj` (Optional) - `attrs` (Dict) = {} - `id` (str) @@ -523,17 +519,19 @@ attribute values from nodes or links. Parsed generate block specification. Attributes: - entity_scope: Type of entities to group ("node" or "link"). + scope: Type of entities to group ("node" or "link"). + path: Optional regex pattern to filter entities by name. group_by: Attribute name to group by (supports dot-notation). - name_template: Template for generated group names. Use ${value} + name: Template for generated group names. Use ${value} as placeholder for the attribute value. attrs: Optional static attributes for generated groups. **Attributes:** -- `entity_scope` (Literal['node', 'link']) +- `scope` (Literal['node', 'link']) - `group_by` (str) -- `name_template` (str) +- `name` (str) +- `path` (Optional[str]) - `attrs` (Dict[str, Any]) = {} ### generate_risk_groups(network: "'Network'", spec: 'GenerateSpec') -> 'List[RiskGroup]' @@ -581,13 +579,15 @@ on attribute conditions. Parsed membership rule specification. Attributes: - entity_scope: Type of entities to match ("node", "link", or "risk_group"). + scope: Type of entities to match ("node", "link", or "risk_group"). + path: Optional regex pattern to filter entities by name. match: Match specification with conditions. **Attributes:** -- `entity_scope` (EntityScope) -- `match` (MatchSpec) +- `scope` (EntityScope) +- `path` (Optional[str]) +- `match` (Optional[MatchSpec]) ### resolve_membership_rules(network: "'Network'") -> 'None' @@ -595,11 +595,11 @@ Apply membership rules to populate entity risk_groups sets. For each risk group with a `_membership_raw` specification: -- If entity_scope is "node" or "link": adds the risk group name to each +- If scope is "node" or "link": adds the risk group name to each matched entity's risk_groups set. -- If entity_scope is "risk_group": adds matched risk groups as children +- If scope is "risk_group": adds matched risk groups as children of this risk group (hierarchical membership). @@ -618,7 +618,22 @@ Parsers for FailurePolicySet and related failure modeling structures. ### build_failure_policy(fp_data: 'Dict[str, Any]', *, policy_name: 'str', derive_seed: 'Callable[[str], Optional[int]]') -> 'FailurePolicy' -No documentation available. +Build a FailurePolicy from a raw configuration dictionary. + +Parses modes, rules, and conditions from the policy definition and +constructs a fully initialized FailurePolicy object. + +Args: + fp_data: Policy definition dict with keys: modes (required), attrs, + expand_groups, expand_children. Each mode contains weight and rules. + policy_name: Name identifier for this policy (used for seed derivation). + derive_seed: Callable to derive deterministic seeds from component names. + +Returns: + FailurePolicy: Configured policy with parsed modes and rules. + +Raises: + ValueError: If modes is empty or malformed, or if rules are invalid. ### build_failure_policy_set(raw: 'Dict[str, Any]', *, derive_seed: 'Callable[[str], Optional[int]]') -> 'FailurePolicySet' @@ -660,12 +675,12 @@ Returns: Failure policy primitives. -Defines `FailureCondition`, `FailureRule`, and `FailurePolicy` for expressing -how nodes, links, and risk groups fail in analyses. Conditions match on -top-level attributes with simple operators; rules select matches using -"all", probabilistic "random" (with `probability`), or fixed-size "choice" -(with `count`). Policies can optionally expand failures by shared risk groups -or by risk-group children. +Defines `FailureRule` and `FailurePolicy` for expressing how nodes, links, +and risk groups fail in analyses. Conditions match on top-level attributes +with simple operators; rules select matches using "all", probabilistic +"random" (with `probability`), or fixed-size "choice" (with `count`). +Policies can optionally expand failures by shared risk groups or by +risk-group children. ### FailureMode @@ -689,77 +704,30 @@ Attributes: ### FailurePolicy -A container for multiple FailureRules plus optional metadata in `attrs`. +A container for failure modes plus optional metadata in `attrs`. The main entry point is `apply_failures`, which: - 1) For each rule, gather the relevant entities (node, link, or risk_group). - 2) Match them based on rule conditions using 'and' or 'or' logic. - 3) Apply the selection strategy (all, random, or choice). - 4) Collect the union of all failed entities across all rules. - 5) Optionally expand failures by shared-risk groups or sub-risks. - -Example YAML configuration: - ```yaml - failure_policy: - attrs: - description: "Regional power grid failure affecting telecom infrastructure" - fail_risk_groups: true - rules: - # Fail all nodes in Texas electrical grid - - entity_scope: "node" - - conditions: - - attr: "electric_grid" - - operator: "==" - value: "texas" - logic: "and" - rule_type: "all" - - # Randomly fail 40% of underground fiber links in affected region - - entity_scope: "link" - - conditions: - - attr: "region" - - operator: "==" - value: "southwest" - - attr: "installation" - - operator: "==" - value: "underground" - logic: "and" - rule_type: "random" - probability: 0.4 - - # Choose exactly 2 risk groups to fail (e.g., data centers) - # Note: logic defaults to "or" when not specified - - entity_scope: "risk_group" - - rule_type: "choice" - count: 2 - ``` + 1) Select a mode based on weights. + 2) For each rule in the mode, gather relevant entities. + 3) Match based on rule conditions using 'and' or 'or' logic. + 4) Apply the selection strategy (all, random, or choice). + 5) Collect the union of all failed entities across all rules. + 6) Optionally expand failures by shared-risk groups or sub-risks. Attributes: - rules (List[FailureRule]): - A list of FailureRules to apply. - attrs (Dict[str, Any]): - Arbitrary metadata about this policy (e.g. "name", "description"). - fail_risk_groups (bool): - If True, after initial selection, expand failures among any - node/link that shares a risk group with a failed entity. - fail_risk_group_children (bool): - If True, and if a risk_group is marked as failed, expand to - children risk_groups recursively. - seed (Optional[int]): - Seed for reproducible random operations. If None, operations - will be non-deterministic. + attrs: Arbitrary metadata about this policy. + expand_groups: If True, expand failures among entities sharing + risk groups with failed entities. + expand_children: If True, expand failed risk groups to include + their children recursively. + seed: Seed for reproducible random operations. + modes: List of weighted failure modes. **Attributes:** - `attrs` (Dict[str, Any]) = {} -- `fail_risk_groups` (bool) = False -- `fail_risk_group_children` (bool) = False +- `expand_groups` (bool) = False +- `expand_children` (bool) = False - `seed` (Optional[int]) - `modes` (List[FailureMode]) = [] @@ -773,34 +741,31 @@ Attributes: Defines how to match and then select entities for failure. Attributes: - entity_scope (EntityScope): - The type of entities this rule applies to: "node", "link", or "risk_group". - conditions (List[FailureCondition]): - A list of conditions to filter matching entities. - logic (Literal["and", "or"]): - "and": All conditions must be true for a match. - "or": At least one condition is true for a match (default). - rule_type (Literal["random", "choice", "all"]): - The selection strategy among the matched set: - -- "random": each matched entity is chosen with probability = `probability`. -- "choice": pick exactly `count` items from the matched set (random sample). -- "all": select every matched entity in the matched set. - - probability (float): - Probability in [0,1], used if `rule_type="random"`. - count (int): - Number of entities to pick if `rule_type="choice"`. + scope: The type of entities this rule applies to: "node", "link", + or "risk_group". + conditions: A list of conditions to filter matching entities. + logic: "and" (all must be true) or "or" (any must be true, default). + mode: The selection strategy among the matched set: + +- "random": each matched entity is chosen with probability. +- "choice": pick exactly `count` items (random sample). +- "all": select every matched entity. + + probability: Probability in [0,1], used if mode="random". + count: Number of entities to pick if mode="choice". + weight_by: Optional attribute for weighted sampling in choice mode. + path: Optional regex pattern to filter entities by name. **Attributes:** -- `entity_scope` (EntityScope) -- `conditions` (List[FailureCondition]) = [] +- `scope` (EntityScope) +- `conditions` (List[Condition]) = [] - `logic` (Literal['and', 'or']) = or -- `rule_type` (Literal['random', 'choice', 'all']) = all +- `mode` (Literal['random', 'choice', 'all']) = all - `probability` (float) = 1.0 - `count` (int) = 1 - `weight_by` (Optional[str]) +- `path` (Optional[str]) --- @@ -850,7 +815,7 @@ Detect circular references in risk group parent-child relationships. Uses DFS-based cycle detection to find any risk group that is part of a cycle in the children hierarchy. This can happen when membership rules -with entity_scope='risk_group' create mutual parent-child relationships. +with scope='risk_group' create mutual parent-child relationships. Args: network: Network with risk_groups populated (after membership resolution). @@ -917,7 +882,7 @@ Example: Serialize a FlowPolicyPreset to its string name for JSON storage. -Handles FlowPolicyPreset enum values, integer enum values, and string fallbacks. +Handles FlowPolicyPreset enum values, integer enum values, and string inputs. Returns None for None input. Args: @@ -1065,16 +1030,10 @@ cost. Cached properties expose derived sequences for nodes and edges, and helpers provide equality, ordering by cost, and sub-path extraction with cost recalculation. -Breaking change from v1.x: Edge references now use EdgeRef (link_id + direction) -instead of integer edge keys for stable scenario-level edge identification. - ### Path Represents a single path in the network. -Breaking change from v1.x: path field now uses EdgeRef (link_id + direction) -instead of integer edge keys for stable scenario-level edge identification. - Attributes: path: Sequence of (node_name, (edge_refs...)) tuples representing the path. The final element typically has an empty tuple of edge refs. @@ -1093,7 +1052,7 @@ Attributes: **Methods:** -- `get_sub_path(self, dst_node: 'str', graph: 'StrictMultiDiGraph | None' = None, cost_attr: 'str' = 'cost') -> 'Path'` - Create a sub-path ending at the specified destination node. +- `get_sub_path(self, dst_node: 'str') -> 'Path'` - Create a sub-path ending at the specified destination node. --- @@ -1102,8 +1061,9 @@ Attributes: Base classes for workflow automation. Defines the workflow step abstraction, registration decorator, and execution -wrapper that adds timing and logging. Steps implement `run()` and are executed -via `execute()` which records metadata and re-raises failures. +lifecycle. Steps implement `run()` and are executed via `execute()` which +handles timing, logging, and metadata recording. Failures are logged and +re-raised. ### WorkflowStep @@ -1116,7 +1076,7 @@ Workflow metadata is automatically stored in scenario.results for analysis. YAML Configuration: ```yaml workflow: - - step_type: + - type: name: "optional_step_name" # Optional: Custom name for this step instance seed: 42 # Optional: Seed for reproducible random operations @@ -1174,7 +1134,7 @@ representation for inspection. YAML Configuration Example: ```yaml workflow: - - step_type: BuildGraph + - type: BuildGraph name: "build_network_graph" # Optional: Custom name for this step add_reverse: true # Optional: Add reverse edges (default: true) @@ -1242,7 +1202,7 @@ Disabled handling: YAML Configuration Example: ```yaml workflow: - - step_type: CostPower + - type: CostPower name: "cost_power" # Optional custom name include_disabled: false # Default: only enabled nodes/links @@ -1305,11 +1265,11 @@ YAML Configuration Example: workflow: -- step_type: MaxFlow +- type: MaxFlow name: "maxflow_dc_to_edge" source: "^datacenter/.*" - sink: "^edge/.*" + target: "^edge/.*" mode: "combine" failure_policy: "random_failures" iterations: 100 @@ -1333,7 +1293,7 @@ many iterations matched that pattern. Attributes: source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). failure_policy: Name of failure policy in scenario.failure_policy_set. iterations: Number of failure iterations to run. @@ -1353,7 +1313,7 @@ Attributes: - `seed` (int | None) - `_seed_source` (str) - `source` (Union[str, Dict[str, Any]]) -- `sink` (Union[str, Dict[str, Any]]) +- `target` (Union[str, Dict[str, Any]]) - `mode` (str) = combine - `failure_policy` (str | None) - `iterations` (int) = 1 @@ -1374,10 +1334,10 @@ Attributes: ## ngraph.workflow.maximum_supported_demand_step -Maximum Supported Demand (MSD) workflow step. +MaximumSupportedDemand workflow step. Searches for the maximum uniform traffic multiplier `alpha_star` that is fully -placeable for a given matrix. Stores results under `data` as: +placeable for a given demand set. Stores results under `data` as: - `alpha_star`: float - `context`: parameters used for the search @@ -1387,16 +1347,45 @@ placeable for a given matrix. Stores results under `data` as: Performance: AnalysisContext is built once at search start and reused across all binary search probes. Only demand volumes change per probe. +YAML Configuration Example: + ```yaml + workflow: + - type: MaximumSupportedDemand + + name: "msd_search" + demand_set: "default" + resolution: 0.01 # Convergence threshold + max_bisect_iters: 50 # Maximum bisection iterations + alpha_start: 1.0 # Starting multiplier + growth_factor: 2.0 # Bracket expansion factor + ``` + ### MaximumSupportedDemand -MaximumSupportedDemand(name: 'str' = '', seed: 'Optional[int]' = None, _seed_source: 'str' = '', matrix_name: 'str' = 'default', acceptance_rule: 'str' = 'hard', alpha_start: 'float' = 1.0, growth_factor: 'float' = 2.0, alpha_min: 'float' = 1e-06, alpha_max: 'float' = 1000000000.0, resolution: 'float' = 0.01, max_bracket_iters: 'int' = 32, max_bisect_iters: 'int' = 32, seeds_per_alpha: 'int' = 1, placement_rounds: 'int | str' = 'auto') +Finds the maximum uniform traffic multiplier that is fully placeable. + +Uses binary search to find alpha_star, the maximum multiplier for all +demands in the set that can still be fully placed on the network. + +Attributes: + demand_set: Name of the demand set to analyze. + acceptance_rule: Currently only "hard" is implemented. + alpha_start: Starting multiplier for binary search. + growth_factor: Factor for bracket expansion. + alpha_min: Minimum allowed alpha value. + alpha_max: Maximum allowed alpha value. + resolution: Convergence threshold for binary search. + max_bracket_iters: Maximum iterations for bracketing phase. + max_bisect_iters: Maximum iterations for bisection phase. + seeds_per_alpha: Number of placement attempts per alpha probe. + placement_rounds: Placement optimization rounds. **Attributes:** - `name` (str) - `seed` (Optional[int]) - `_seed_source` (str) -- `matrix_name` (str) = default +- `demand_set` (str) = default - `acceptance_rule` (str) = hard - `alpha_start` (float) = 1.0 - `growth_factor` (float) = 2.0 @@ -1426,7 +1415,7 @@ optional exclusion simulation and disabled entity handling. YAML Configuration Example: ```yaml workflow: - - step_type: NetworkStats + - type: NetworkStats name: "network_statistics" # Optional: Custom name for this step include_disabled: false # Include disabled nodes/links in stats @@ -1483,7 +1472,7 @@ instances using the WORKFLOW_STEP_REGISTRY and attaches unique names/seeds. Instantiate workflow steps from normalized dictionaries. Args: - workflow_data: List of step dicts; each must have "step_type". + workflow_data: List of step dicts; each must have "type". derive_seed: Callable that takes a step name and returns a seed or None. Returns: @@ -1495,15 +1484,29 @@ Returns: TrafficMatrixPlacement workflow step. -Runs Monte Carlo demand placement using a named traffic matrix and produces +Runs Monte Carlo demand placement using a named demand set and produces unified `flow_results` per iteration under `data.flow_results`. Baseline (no failures) is always run first as a separate reference. The `iterations` parameter specifies how many failure scenarios to run. +YAML Configuration Example: + ```yaml + workflow: + - type: TrafficMatrixPlacement + + name: "tm_analysis" + demand_set: "default" + failure_policy: "single_link" # Optional: failure policy name + iterations: 100 # Number of failure scenarios + parallelism: 4 # Worker processes (or "auto") + alpha: 1.0 # Demand volume multiplier + include_flow_details: true # Include cost distribution per flow + ``` + ### TrafficMatrixPlacement -Monte Carlo demand placement using a named traffic matrix. +Monte Carlo demand placement using a named demand set. Baseline (no failures) is always run first as a separate reference. Results are returned with baseline in a separate field. The flow_results list contains unique @@ -1511,8 +1514,8 @@ failure patterns (deduplicated); each result has occurrence_count indicating how many iterations matched that pattern. Attributes: - matrix_name: Name of the traffic matrix to analyze. - failure_policy: Optional policy name in scenario.failure_policy_set. + demand_set: Name of the demand set to analyze. + failure_policy: Optional failure policy name in scenario.failure_policy_set. iterations: Number of failure iterations to run. parallelism: Number of parallel worker processes. placement_rounds: Placement optimization rounds (int or "auto"). @@ -1520,7 +1523,7 @@ Attributes: store_failure_patterns: Whether to store failure pattern results. include_flow_details: When True, include cost_distribution per flow. include_used_edges: When True, include set of used edges per demand in entry data. - alpha: Numeric scale for demands in the matrix. + alpha: Numeric scale for demands in the set. alpha_from_step: Optional producer step name to read alpha from. alpha_from_field: Dotted field path in producer step (default: "data.alpha_star"). @@ -1529,7 +1532,7 @@ Attributes: - `name` (str) - `seed` (int | None) - `_seed_source` (str) -- `matrix_name` (str) +- `demand_set` (str) - `failure_policy` (str | None) - `iterations` (int) = 1 - `parallelism` (int | str) = auto @@ -1556,24 +1559,19 @@ Network topology blueprints and generation. Represents a reusable blueprint for hierarchical sub-topologies. -A blueprint may contain multiple groups of nodes (each can have a node_count -and a name_template), plus adjacency rules describing how those groups connect. +A blueprint may contain multiple node definitions (each can have count +and template), plus link definitions describing how those nodes connect. Attributes: - name (str): Unique identifier of this blueprint. - groups (Dict[str, Any]): A mapping of group_name -> group definition. - Allowed top-level keys in each group definition here are the same - as in normal group definitions (e.g. node_count, name_template, - attrs, disabled, risk_groups, or nested use_blueprint references, etc.). - adjacency (List[Dict[str, Any]]): A list of adjacency definitions - describing how these groups are linked, using the DSL fields - (source, target, pattern, link_params, etc.). + name: Unique identifier of this blueprint. + nodes: A mapping of node_name -> node definition. + links: A list of link definitions. **Attributes:** - `name` (str) -- `groups` (Dict[str, Any]) -- `adjacency` (List[Dict[str, Any]]) +- `nodes` (Dict[str, Any]) +- `links` (List[Dict[str, Any]]) ### DSLExpansionContext @@ -1581,16 +1579,15 @@ Carries the blueprint definitions and the final Network instance to be populated during DSL expansion. Attributes: - blueprints (Dict[str, Blueprint]): Dictionary of blueprint-name -> Blueprint. - network (Network): The Network into which expanded nodes/links are inserted. - pending_bp_adj (List[tuple[Dict[str, Any], str]]): Deferred blueprint adjacency - expansions collected as (adj_def, parent_path) to be processed later. + blueprints: Dictionary of blueprint-name -> Blueprint. + network: The Network into which expanded nodes/links are inserted. + pending_bp_links: Deferred blueprint link expansions. **Attributes:** - `blueprints` (Dict[str, Blueprint]) - `network` (Network) -- `pending_bp_adj` (List[tuple[Dict[str, Any], str]]) = [] +- `pending_bp_links` (List[tuple[Dict[str, Any], str]]) = [] ### expand_network_dsl(data: 'Dict[str, Any]') -> 'Network' @@ -1599,43 +1596,33 @@ Expands a combined blueprint + network DSL into a complete Network object. Overall flow: 1) Parse "blueprints" into Blueprint objects. 2) Build a Network from "network" metadata (e.g. name, version). - 3) Expand 'network["groups"]' (collect blueprint adjacencies for later). + 3) Expand 'network["nodes"]' (collect blueprint links for later). -- If a group references a blueprint, incorporate that blueprint's subgroups +- If a node group references a blueprint, incorporate that blueprint's - while merging parent's attrs + disabled + risk_groups into subgroups. - Blueprint adjacency is deferred and processed after node overrides. + nodes while merging parent's attrs + disabled + risk_groups. + Blueprint links are deferred and processed after node rules. - Otherwise, directly create nodes (a "direct node group"). - 4) Process any direct node definitions (network["nodes"]). - 5) Process node overrides (in order if multiple overrides match). - 6) Expand deferred blueprint adjacencies. - 7) Expand adjacency definitions in 'network["adjacency"]'. - 8) Process any direct link definitions (network["links"]). - 9) Process link overrides (in order if multiple overrides match). + 4) Process node rules (in order if multiple rules match). + 5) Expand deferred blueprint links. + 6) Expand link definitions in 'network["links"]'. + 7) Process link rules (in order if multiple rules match). Field validation rules: -- Only certain top-level fields are permitted in each structure. Any extra - - keys raise a ValueError. "attrs" is where arbitrary user fields go. - -- For link_params, recognized fields are "capacity", "cost", "disabled", - - "risk_groups", "attrs". Everything else must go inside link_params["attrs"]. - -- For node/group definitions, recognized fields include "node_count", +- Only certain top-level fields are permitted in each structure. +- Link properties are flat (capacity, cost, etc. at link level). +- For node definitions: count, template, attrs, disabled, risk_groups, - "name_template", "attrs", "disabled", "risk_groups" or "use_blueprint" - for blueprint-based groups. + or blueprint for blueprint-based nodes. Args: - data (Dict[str, Any]): The YAML-parsed dictionary containing - optional "blueprints" + "network". + data: The YAML-parsed dictionary containing optional "blueprints" + "network". Returns: - Network: The expanded Network object with all nodes and links. + The expanded Network object with all nodes and links. --- @@ -1646,16 +1633,9 @@ Parsing helpers for the network DSL. This module factors out pure parsing/validation helpers from the expansion module so they can be tested independently and reused. -### check_adjacency_keys(adj_def: 'Dict[str, Any]', context: 'str') -> 'None' +### check_link_keys(link_def: 'Dict[str, Any]', context: 'str') -> 'None' -Ensure adjacency definitions only contain recognized keys. - -### check_link_params(link_params: 'Dict[str, Any]', context: 'str') -> 'None' - -Ensure link_params contain only recognized keys. - -Link attributes may include "hardware" per-end mapping when set under -link_params.attrs. This function only validates top-level link_params keys. +Ensure link definitions only contain recognized keys. ### check_no_extra_keys(data_dict: 'Dict[str, Any]', allowed: 'set[str]', context: 'str') -> 'None' @@ -1760,19 +1740,20 @@ Provides dataclasses for template expansion configuration. Specification for variable-based expansion. Attributes: - expand_vars: Mapping of variable names to lists of values. - expansion_mode: How to combine variable values. + vars: Mapping of variable names to lists of values. + mode: How to combine variable values. - "cartesian": All combinations (default) - "zip": Pair values by position **Attributes:** -- `expand_vars` (Dict[str, List[Any]]) = {} -- `expansion_mode` (Literal['cartesian', 'zip']) = cartesian +- `vars` (Dict[str, List[Any]]) = {} +- `mode` (Literal['cartesian', 'zip']) = cartesian **Methods:** +- `from_dict(data: 'Dict[str, Any]') -> "Optional['ExpansionSpec']"` - Extract expand: block from dict. - `is_empty(self) -> 'bool'` - Check if no variables are defined. --- @@ -1781,8 +1762,23 @@ Attributes: Variable expansion for templates. -Provides expand_templates() function for substituting $var and ${var} -placeholders in template strings. +Provides substitution of $var and ${var} placeholders in strings, +with recursive substitution in nested structures. + +### expand_block(block: 'Dict[str, Any]', spec: "Optional['ExpansionSpec']") -> 'Iterator[Dict[str, Any]]' + +Expand a DSL block, yielding one dict per variable combination. + +If no expand spec is provided or it has no vars, yields the original block. +Otherwise, yields a deep copy with all strings substituted for each +variable combination. + +Args: + block: DSL block (dict) that may contain template strings. + spec: Optional expansion specification. + +Yields: + Dict with variable substitutions applied. ### expand_templates(templates: 'Dict[str, str]', spec: "'ExpansionSpec'") -> 'Iterator[Dict[str, str]]' @@ -1791,7 +1787,7 @@ Expand template strings with variable substitution. Uses $var or ${var} syntax only. Args: - templates: Dict of template strings, e.g. {"source": "dc${dc}/...", "sink": "..."}. + templates: Dict of template strings. spec: Expansion specification with variables and mode. Yields: @@ -1801,26 +1797,16 @@ Raises: ValueError: If zip mode has mismatched list lengths or expansion exceeds limit. KeyError: If a template references an undefined variable. -Example: - >>> spec = ExpansionSpec(expand_vars={"dc": [1, 2]}) - >>> list(expand_templates({"src": "dc${dc}"}, spec)) - [{"src": "dc1"}, {"src": "dc2"}] +### substitute_vars(obj: 'Any', var_dict: 'Dict[str, Any]') -> 'Any' -### substitute_vars(template: 'str', var_dict: 'Dict[str, Any]') -> 'str' - -Substitute $var and ${var} placeholders in a template string. - -Uses $ prefix to avoid collision with regex {m,n} quantifiers. +Recursively substitute ${var} in all strings within obj. Args: - template: String containing $var or ${var} placeholders. + obj: Any value (string, dict, list, or primitive). var_dict: Mapping of variable names to values. Returns: - Template with variables substituted. - -Raises: - KeyError: If a referenced variable is not in var_dict. + Object with all string values having variables substituted. --- @@ -1848,7 +1834,7 @@ Condition evaluation for node/entity filtering. Provides evaluation logic for attribute conditions used in selectors and failure policies. Supports operators: ==, !=, <, <=, >, >=, -contains, not_contains, in, not_in, any_value, no_value. +contains, not_contains, in, not_in, exists, not_exists. Supports dot-notation for nested attribute access (e.g., "hardware.vendor"). @@ -1958,7 +1944,7 @@ Raises: Schema definitions for unified node selection. Provides dataclasses for node selection configuration used across -adjacency, demands, overrides, and workflow steps. +network rules, demands, and workflow steps. ### Condition @@ -1969,13 +1955,13 @@ resolves to attrs["hardware"]["vendor"]). Attributes: attr: Attribute name to match (supports dot-notation for nested attrs). - operator: Comparison operator. - value: Right-hand operand (unused for any_value/no_value). + op: Comparison operator. + value: Right-hand operand (unused for exists/not_exists). **Attributes:** - `attr` (str) -- `operator` (Literal['==', '!=', '<', '<=', '>', '>=', 'contains', 'not_contains', 'in', 'not_in', 'any_value', 'no_value']) +- `op` (Literal['==', '!=', '<', '<=', '>', '>=', 'contains', 'not_contains', 'in', 'not_in', 'exists', 'not_exists']) - `value` (Any) ### MatchSpec @@ -2297,12 +2283,24 @@ Args: Scenario snapshot helpers. -Build a concise dictionary snapshot of failure policies and traffic matrices for +Build a concise dictionary snapshot of failure policies and demand sets for export into results without keeping heavy domain objects. -### build_scenario_snapshot(*, seed: 'int | None', failure_policy_set, traffic_matrix_set) -> 'Dict[str, Any]' +### build_scenario_snapshot(*, seed: 'int | None', failure_policy_set, demand_set) -> 'Dict[str, Any]' + +Build a concise dictionary snapshot of the scenario state. -No documentation available. +Creates a serializable representation of the scenario's failure policies +and demand sets, suitable for export into results without keeping heavy +domain objects. + +Args: + seed: Scenario-level seed for reproducibility, or None if unseeded. + failure_policy_set: FailurePolicySet containing named failure policies. + demand_set: DemandSet containing named demand collections. + +Returns: + Dict containing: seed, failures (policy snapshots), demands (demand snapshots). --- @@ -2495,7 +2493,7 @@ Determines how multiple source and sink nodes are combined for analysis. Types and data structures for algorithm analytics. -Defines immutable summary containers and aliases for algorithm outputs. +Defines immutable summary containers for algorithm outputs. ### EdgeRef @@ -2891,15 +2889,17 @@ Expand TrafficDemand specifications into concrete demands with augmentations. Pure function that: -1. Expands variables in selectors using expand_vars -2. Normalizes and evaluates selectors to get node groups -3. Distributes volume based on mode (combine/pairwise) and group_mode -4. Generates augmentation edges for combine mode (pseudo nodes) -5. Returns demands (node names) + augmentations +1. Normalizes and evaluates selectors to get node groups +2. Distributes volume based on mode (combine/pairwise) and group_mode +3. Generates augmentation edges for combine mode (pseudo nodes) +4. Returns demands (node names) + augmentations Node names are used (not IDs) so expansion happens BEFORE graph building. IDs are resolved after graph is built with augmentations. +Note: Variable expansion (expand: block) is handled during YAML parsing in +build_demand_set(), so TrafficDemand objects here are already expanded. + Args: network: Network for node selection. traffic_demands: High-level demand specifications. @@ -2940,8 +2940,8 @@ and parallelism level. Protocol for analysis functions used with FailureManager. -Analysis functions should take a Network, exclusion sets, and any additional -keyword arguments, returning analysis results of any type. +Analysis functions take a Network, exclusion sets, and analysis-specific +parameters, returning results of any type. ### FailureManager @@ -2964,10 +2964,10 @@ Attributes: - `compute_exclusions(self, policy: "'FailurePolicy | None'" = None, seed_offset: 'int | None' = None, failure_trace: 'Optional[Dict[str, Any]]' = None) -> 'tuple[set[str], set[str]]'` - Compute set of nodes and links to exclude for a failure iteration. - `get_failure_policy(self) -> "'FailurePolicy | None'"` - Get failure policy for analysis. -- `run_demand_placement_monte_carlo(self, demands_config: 'list[dict[str, Any]] | Any', iterations: 'int' = 100, parallelism: 'int' = 1, placement_rounds: 'int | str' = 'auto', seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_details: 'bool' = False, include_used_edges: 'bool' = False, **kwargs) -> 'Any'` - Analyze traffic demand placement success under failures. -- `run_max_flow_monte_carlo(self, source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement | str' = , seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_summary: 'bool' = False, **kwargs) -> 'Any'` - Analyze maximum flow capacity envelopes between node groups under failures. +- `run_demand_placement_monte_carlo(self, demands_config: 'list[dict[str, Any]] | Any', iterations: 'int' = 100, parallelism: 'int' = 1, placement_rounds: 'int | str' = 'auto', seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_details: 'bool' = False, include_used_edges: 'bool' = False) -> 'Any'` - Analyze traffic demand placement success under failures. +- `run_max_flow_monte_carlo(self, source: 'str | dict[str, Any]', target: 'str | dict[str, Any]', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement | str' = , seed: 'int | None' = None, store_failure_patterns: 'bool' = False, include_flow_summary: 'bool' = False, include_min_cut: 'bool' = False) -> 'Any'` - Analyze maximum flow capacity envelopes between node groups under failures. - `run_monte_carlo_analysis(self, analysis_func: 'AnalysisFunction', iterations: 'int' = 1, parallelism: 'int' = 1, seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **analysis_kwargs) -> 'dict[str, Any]'` - Run Monte Carlo failure analysis with any analysis function. -- `run_sensitivity_monte_carlo(self, source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement | str' = , seed: 'int | None' = None, store_failure_patterns: 'bool' = False, **kwargs) -> 'dict[str, Any]'` - Analyze component criticality for flow capacity under failures. +- `run_sensitivity_monte_carlo(self, source: 'str | dict[str, Any]', target: 'str | dict[str, Any]', mode: 'str' = 'combine', iterations: 'int' = 100, parallelism: 'int' = 1, shortest_path: 'bool' = False, flow_placement: 'FlowPlacement | str' = , seed: 'int | None' = None, store_failure_patterns: 'bool' = False) -> 'dict[str, Any]'` - Analyze component criticality for flow capacity under failures. - `run_single_failure_scenario(self, analysis_func: 'AnalysisFunction', **kwargs) -> 'Any'` - Run a single failure scenario for convenience. --- @@ -2976,12 +2976,12 @@ Attributes: Flow analysis functions for network evaluation. -These functions are designed for use with FailureManager and follow the -AnalysisFunction protocol: analysis_func(network: Network, excluded_nodes: Set[str], -excluded_links: Set[str], **kwargs) -> Any. +These functions are designed for use with FailureManager. Each analysis function +takes a Network, exclusion sets, and analysis-specific parameters, returning +results of type FlowIterationResult. -All functions accept only simple, hashable parameters to ensure compatibility -with FailureManager's caching and multiprocessing systems. +Parameters should ideally be hashable for efficient caching in FailureManager; +non-hashable objects are identified by memory address for cache key generation. Graph caching enables efficient repeated analysis with different exclusion sets by building the graph once and using O(|excluded|) masks for exclusions. @@ -2994,7 +2994,7 @@ sharing the same sources, this can reduce SPF computations by an order of magnit Build an AnalysisContext for repeated demand placement analysis. -Pre-computes the graph with augmentations (pseudo source/sink nodes) for +Pre-computes the graph with augmentations (pseudo source/target nodes) for efficient repeated analysis with different exclusion sets. Args: @@ -3004,23 +3004,23 @@ Args: Returns: AnalysisContext ready for use with demand_placement_analysis. -### build_maxflow_context(network: "'Network'", source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine') -> 'AnalysisContext' +### build_maxflow_context(network: "'Network'", source: 'str | dict[str, Any]', target: 'str | dict[str, Any]', mode: 'str' = 'combine') -> 'AnalysisContext' Build an AnalysisContext for repeated max-flow analysis. -Pre-computes the graph with pseudo source/sink nodes for all source/sink +Pre-computes the graph with pseudo source/target nodes for all source/target pairs, enabling O(|excluded|) mask building per iteration. Args: network: Network instance. source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). Returns: AnalysisContext ready for use with max_flow_analysis or sensitivity_analysis. -### demand_placement_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', demands_config: 'list[dict[str, Any]]', placement_rounds: 'int | str' = 'auto', include_flow_details: 'bool' = False, include_used_edges: 'bool' = False, context: 'Optional[AnalysisContext]' = None, **kwargs) -> 'FlowIterationResult' +### demand_placement_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', demands_config: 'list[dict[str, Any]]', placement_rounds: 'int | str' = 'auto', include_flow_details: 'bool' = False, include_used_edges: 'bool' = False, context: 'Optional[AnalysisContext]' = None) -> 'FlowIterationResult' Analyze traffic demand placement success rates using Core directly. @@ -3029,7 +3029,7 @@ This function: 1. Builds Core infrastructure (graph, algorithms, flow_graph) or uses cached 2. Expands demands into concrete (src, dst, volume) tuples 3. Places each demand using SPF caching for cacheable policies -4. Falls back to FlowPolicy for complex multi-flow policies +4. Uses FlowPolicy for complex multi-flow policies 5. Aggregates results into FlowIterationResult SPF Caching Optimization: @@ -3047,12 +3047,11 @@ Args: include_flow_details: When True, include cost_distribution per flow. include_used_edges: When True, include set of used edges per demand in entry data. context: Pre-built AnalysisContext for fast repeated analysis. - **kwargs: Ignored. Accepted for interface compatibility. Returns: FlowIterationResult describing this iteration. -### max_flow_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine', shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , include_flow_details: 'bool' = False, include_min_cut: 'bool' = False, context: 'Optional[AnalysisContext]' = None, **kwargs) -> 'FlowIterationResult' +### max_flow_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source: 'str | dict[str, Any]', target: 'str | dict[str, Any]', mode: 'str' = 'combine', shortest_path: 'bool' = False, require_capacity: 'bool' = True, flow_placement: 'FlowPlacement' = , include_flow_details: 'bool' = False, include_min_cut: 'bool' = False, context: 'Optional[AnalysisContext]' = None) -> 'FlowIterationResult' Analyze maximum flow capacity between node groups. @@ -3061,7 +3060,7 @@ Args: excluded_nodes: Set of node names to exclude temporarily. excluded_links: Set of link IDs to exclude temporarily. source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). shortest_path: Whether to use shortest paths only. require_capacity: If True (default), path selection considers available @@ -3070,18 +3069,17 @@ Args: include_flow_details: Whether to collect cost distribution and similar details. include_min_cut: Whether to include min-cut edge list in entry data. context: Pre-built AnalysisContext for efficient repeated analysis. - **kwargs: Ignored. Accepted for interface compatibility. Returns: FlowIterationResult describing this iteration. -### sensitivity_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source: 'str | dict[str, Any]', sink: 'str | dict[str, Any]', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , context: 'Optional[AnalysisContext]' = None, **kwargs) -> 'FlowIterationResult' +### sensitivity_analysis(network: "'Network'", excluded_nodes: 'Set[str]', excluded_links: 'Set[str]', source: 'str | dict[str, Any]', target: 'str | dict[str, Any]', mode: 'str' = 'combine', shortest_path: 'bool' = False, flow_placement: 'FlowPlacement' = , context: 'Optional[AnalysisContext]' = None) -> 'FlowIterationResult' Analyze component sensitivity to failures. Identifies critical edges (saturated edges) and computes the flow reduction caused by removing each one. Returns a FlowIterationResult where each -FlowEntry represents a source/sink pair with: +FlowEntry represents a source/target pair with: - demand/placed = max flow value (the capacity being analyzed) - dropped = 0.0 (baseline analysis, no failures applied) @@ -3092,14 +3090,13 @@ Args: excluded_nodes: Set of node names to exclude temporarily. excluded_links: Set of link IDs to exclude temporarily. source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). shortest_path: If True, use single-tier shortest-path flow (IP/IGP mode). Reports only edges used under ECMP routing. If False (default), use full iterative max-flow (SDN/TE mode) and report all saturated edges. flow_placement: Flow placement strategy. context: Pre-built AnalysisContext for efficient repeated analysis. - **kwargs: Ignored. Accepted for interface compatibility. Returns: FlowIterationResult with sensitivity data in each FlowEntry.data. diff --git a/docs/reference/api.md b/docs/reference/api.md index 160c082..2741735 100644 --- a/docs/reference/api.md +++ b/docs/reference/api.md @@ -354,7 +354,7 @@ network.add_link(Link("A", "B", capacity=100.0)) network.add_link(Link("B", "C", capacity=100.0)) # Define failure policy: randomly choose 1 link to fail -rule = FailureRule(entity_scope="link", rule_type="choice", count=1) # scope can be "node", "link", or "risk_group" +rule = FailureRule(scope="link", mode="choice", count=1) # scope can be "node", "link", or "risk_group" mode = FailureMode(weight=1.0, rules=[rule]) policy = FailurePolicy(modes=[mode]) policy_set = FailurePolicySet(policies={"single_link": policy}) @@ -368,13 +368,12 @@ fm = FailureManager( # Run max-flow Monte Carlo analysis results = fm.run_max_flow_monte_carlo( - source_path="^A$", - sink_path="^C$", + source="^A$", + target="^C$", mode="combine", iterations=100, parallelism=1, - baseline=True, # Include no-failure baseline - seed=42 # For reproducibility + seed=42 # For reproducibility ) # Access results @@ -396,10 +395,10 @@ Pre-built analysis steps for YAML-driven workflows. ```yaml workflow: - - step_type: MaxFlow + - type: MaxFlow name: "dc_to_edge_capacity" - source_path: "^datacenter/.*" - sink_path: "^edge/.*" + source: "^datacenter/.*" + target: "^edge/.*" mode: "combine" failure_policy: "random_link_failures" iterations: 100 @@ -413,9 +412,9 @@ workflow: ```yaml workflow: - - step_type: TrafficMatrixPlacement + - type: TrafficMatrixPlacement name: "tm_placement_analysis" - matrix_name: "peak_traffic" + demand_set: "peak_traffic" failure_policy: "dual_link_failures" iterations: 100 parallelism: auto @@ -426,9 +425,9 @@ workflow: ```yaml workflow: - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: "find_alpha_star" - matrix_name: "peak_traffic" + demand_set: "peak_traffic" alpha_start: 1.0 growth_factor: 2.0 resolution: 0.01 @@ -439,7 +438,7 @@ workflow: ```yaml workflow: - - step_type: NetworkStats + - type: NetworkStats name: "baseline_stats" include_disabled: false excluded_nodes: ["n1"] @@ -449,7 +448,7 @@ workflow: ```yaml workflow: - - step_type: CostPower + - type: CostPower name: "cost_power_analysis" include_disabled: false aggregation_level: 2 diff --git a/docs/reference/cli.md b/docs/reference/cli.md index 9a11996..1568875 100644 --- a/docs/reference/cli.md +++ b/docs/reference/cli.md @@ -167,13 +167,13 @@ The `--keys` option filters by the `name` field of workflow steps defined in you ```yaml workflow: - - step_type: NetworkStats + - type: NetworkStats name: network_statistics - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd_baseline ``` -Then `--keys build_graph` will include only the results from the BuildGraph step, and `--keys capacity_analysis` will include only the MaxFlow results. +Then `--keys network_statistics` will include only the results from the NetworkStats step, and `--keys msd_baseline` will include only the MaximumSupportedDemand results. ### Performance Profiling @@ -210,13 +210,13 @@ The CLI outputs results as JSON with a fixed top-level shape: ```json { - "workflow": { "": { "step_type": "...", "execution_order": 0, "step_name": "..." } }, + "workflow": { "": { "type": "...", "execution_order": 0, "step_name": "..." } }, "steps": { "network_statistics": { "metadata": {}, "data": { "node_count": 42, "link_count": 84 } }, - "msd_baseline": { "metadata": {}, "data": { "alpha_star": 1.23, "context": { "matrix_name": "baseline_traffic_matrix" } } }, - "tm_placement": { "metadata": { "iterations": 1000 }, "data": { "flow_results": [ { "flows": [], "summary": {} } ], "context": { "matrix_name": "baseline_traffic_matrix" } } } + "msd_baseline": { "metadata": {}, "data": { "alpha_star": 1.23, "context": { "demand_set": "baseline_traffic_matrix" } } }, + "tm_placement": { "metadata": { "iterations": 1000 }, "data": { "flow_results": [ { "flows": [], "summary": {} } ], "context": { "demand_set": "baseline_traffic_matrix" } } } }, - "scenario": { "seed": 42, "failure_policy_set": { }, "traffic_matrices": { } } + "scenario": { "seed": 42, "failures": { }, "demands": { } } } ``` diff --git a/docs/reference/design.md b/docs/reference/design.md index 34aad3d..a46e607 100644 --- a/docs/reference/design.md +++ b/docs/reference/design.md @@ -88,19 +88,19 @@ Key elements of the DSL include: - **Blueprints**: Reusable templates for subsets of the topology. A blueprint defines internal node types, roles, and optional internal links. Blueprints enable defining a complex multi-node topology once and instantiating it multiple times with different parameters. -- **Groups**: Definitions of groups of nodes in the topology, either explicitly or via patterns. Groups can use a blueprint (use_blueprint) with parameters, or define a number of nodes (node_count) with a naming template. +- **Node Groups**: Definitions of node groups in the topology, either explicitly or via patterns. Groups can use a blueprint (`blueprint`) with parameters (`params`), or define a number of nodes (`count`) with a naming template (`template`). -- **Adjacency**: Rules to generate links between groups of nodes. Instead of enumerating every link, an adjacency rule specifies source and target selectors (by path pattern), a wiring pattern (e.g. mesh for full mesh or one_to_one for paired links), number of parallel links (link_count), and link parameters (capacity, cost, attributes like distance, hardware, risk group tags, etc.). Advanced matching allows filtering nodes by attributes with logical conditions (AND/OR) to apply adjacency rules to selected nodes only. A single rule can thus expand into many concrete links. +- **Links**: Rules to generate links between node groups. Instead of enumerating every link, a link rule specifies source and target selectors (by path pattern), a wiring pattern (e.g. mesh for full mesh or one_to_one for paired links), number of parallel links (`count`), and link properties (capacity, cost, attributes like distance, hardware, risk group tags, etc.). Link properties are specified at the top level, not inside a wrapper. Advanced matching allows filtering nodes by attributes with logical conditions (AND/OR) to apply link rules to selected nodes only. A single rule can thus expand into many concrete links. -- **Overrides**: Optional modifications applied after the initial expansion. node_overrides or link_overrides can match specific nodes or links (by path or endpoints) and change their attributes or disable them. This allows fine-tuning or simulating removals without changing the base definitions. +- **Rules**: Optional modifications applied after the initial expansion. `node_rules` or `link_rules` can match specific nodes or links (by path or endpoints) and change their attributes or disable them. This allows fine-tuning or simulating removals without changing the base definitions. - **Risk Groups**: Named shared-risk groups (potentially nested) that nodes or links can belong to. These are used in failure scenarios to correlate failures (e.g. all links in a risk group fail together). -- **Traffic Matrices**: Demand definitions specifying source node sets, sink node sets (by regex or attribute path selectors), and demand volume. Each demand can also include priority or custom flow placement policy. +- **Demands**: Traffic demand definitions specifying source node sets, target node sets (by regex or attribute path selectors), and volume. Each demand can also include priority or custom flow placement policy. - **Failure Policies**: Definitions of failure scenarios or modes, possibly with weights (probabilities). For example, a policy might say "with 5% chance, fail any single core node" or "fail all links in risk_group X". The failure manager uses these policies to generate specific failure combinations for simulation. -- **Workflow**: An ordered list of analysis steps to execute. Each step has a step_type (the analysis to perform, such as "MaxFlow" or "TrafficMatrixPlacement"), a unique name, and parameters (like number of iterations, etc.). The workflow definition orchestrates the analysis pipeline. +- **Workflow**: An ordered list of analysis steps to execute. Each step has a `type` (the analysis to perform, such as "MaxFlow" or "TrafficMatrixPlacement"), a unique name, and parameters (like number of iterations, etc.). The workflow definition orchestrates the analysis pipeline. ### DSL Expansion Process @@ -204,7 +204,7 @@ source: match: conditions: - attr: "tier" - operator: "==" + op: "==" value: "leaf" ``` @@ -512,7 +512,7 @@ After the loop, the C++ algorithm computes a FlowSummary which includes: - min_cut: the list of edges that are saturated and go from reachable to non-reachable (these form the minimum cut) -- cost_distribution: flow volume placed at each path cost tier. Core returns parallel arrays (`costs`, `flows`); Python wrapper converts to `Dict[Cost, Flow]` mapping in `FlowSummary.cost_distribution`. +- cost_distribution: flow volume placed at each path cost tier. Core returns parallel arrays (`costs`, `flows`); AnalysisContext converts these to `Dict[Cost, Flow]` mapping in `FlowSummary.cost_distribution`. This is returned along with the total flow value. @@ -625,20 +625,20 @@ For traffic matrix placement, NetGraph provides `FlowPolicyPreset` values that b ```yaml # IP network with traditional ECMP (e.g., data center leaf-spine) -traffic_matrix_set: +demands: dc_traffic: - source: ^rack1/ - sink: ^rack2/ - demand: 1000.0 - flow_policy_config: SHORTEST_PATHS_ECMP + target: ^rack2/ + volume: 1000.0 + flow_policy: SHORTEST_PATHS_ECMP # MPLS-TE network with capacity-aware tunnel placement -traffic_matrix_set: +demands: backbone_traffic: - source: ^metro1/ - sink: ^metro2/ - demand: 5000.0 - flow_policy_config: TE_WCMP_UNLIM + target: ^metro2/ + volume: 5000.0 + flow_policy: TE_WCMP_UNLIM ``` ### Pseudocode (simplified max-flow loop) @@ -700,9 +700,9 @@ Practical performance is significantly better than worst-case bounds due to earl Managers handle scenario dynamics and prepare inputs for algorithmic steps. -**Demand Expansion** (`ngraph.model.demand.builder`): Builds traffic matrix sets from DSL definitions, expanding source/sink patterns into concrete node groups. +**Demand Expansion** (`ngraph.model.demand.builder`): Builds demand sets from DSL definitions, expanding source/target patterns into concrete node groups. -- Deterministic expansion: source/sink node lists sorted alphabetically; no randomization +- Deterministic expansion: source/target node lists sorted alphabetically; no randomization - Supports `combine` mode (aggregate via pseudo nodes) and `pairwise` mode (individual (src,dst) pairs with volume split) - Demands sorted by ascending priority before placement (lower value = higher priority) - Placement uses SPF caching for simple policies (ECMP, WCMP, TE_WCMP_UNLIM), FlowPolicy for complex multi-flow policies @@ -729,15 +729,15 @@ Common built-in steps: - NetworkStats: computes node/link counts, capacity statistics, cost statistics, and degree statistics. Supports optional `excluded_nodes`/`excluded_links` and `include_disabled`. -- TrafficMatrixPlacement: runs Monte Carlo placement using a named traffic matrix and the Failure Manager. Supports `baseline`, `iterations`, `parallelism`, `placement_rounds`, `store_failure_patterns`, `include_flow_details`, `include_used_edges`, and `alpha` or `alpha_from_step` (default `data.alpha_star`). Produces `data.flow_results` per iteration. +- TrafficMatrixPlacement: runs Monte Carlo placement using a named demand set and the Failure Manager. Supports `baseline`, `iterations`, `parallelism`, `placement_rounds`, `store_failure_patterns`, `include_flow_details`, `include_used_edges`, and `alpha` or `alpha_from_step` (default `data.alpha_star`). Produces `data.flow_results` per iteration. - MaxFlow: runs Monte Carlo maximum-flow analysis between node groups using the Failure Manager. Supports `mode` (combine/pairwise), `baseline`, `iterations`, `parallelism`, `shortest_path`, `flow_placement`, and optional `include_flow_details`/`include_min_cut`. Produces `data.flow_results` per iteration. -- MaximumSupportedDemand (MSD): uses bracketing and bisection on alpha to find the maximum multiplier such that alpha * demand is feasible. Stores `data.alpha_star`, `data.context`, `data.base_demands`, and `data.probes`. +- MaximumSupportedDemand (MSD): uses bracketing and bisection on alpha to find the maximum multiplier such that alpha * volume is feasible. Stores `data.alpha_star`, `data.context`, `data.base_demands`, and `data.probes`. - CostPower: aggregates platform and per-end optics capex/power by hierarchy level (0..N). Respects `include_disabled` and `aggregation_level`. Stores `data.levels` and `data.context`. -Each step is implemented in the code (in ngraph.workflow module) and has a corresponding step_type name. Steps are pure functions that don't modify the Network. They take inputs, often including references to prior steps' results (the workflow engine allows one step to use another step's output). For instance, a placement step might need the value of alpha* from an MSD step; the workflow definition can specify that link. +Each step is implemented in the code (in ngraph.workflow module) and has a corresponding `type` name. Steps are pure functions that don't modify the Network. They take inputs, often including references to prior steps' results (the workflow engine allows one step to use another step's output). For instance, a placement step might need the value of alpha* from an MSD step; the workflow definition can specify that link. ### Results storage diff --git a/docs/reference/dsl.md b/docs/reference/dsl.md index 875f8e6..7024f47 100644 --- a/docs/reference/dsl.md +++ b/docs/reference/dsl.md @@ -26,9 +26,9 @@ The DSL uses three distinct template syntaxes in different contexts: | Syntax | Example | Context | Purpose | |--------|---------|---------|---------| -| `[1-3]` | `dc[1-3]/rack[a,b]` | Group names | Generate multiple groups | -| `$var` / `${var}` | `pod${p}/leaf` | Adjacency, demands | Template expansion with `expand_vars` | -| `{node_num}` | `srv-{node_num}` | `name_template` | Node naming (1-indexed) | +| `[1-3]` | `dc[1-3]/rack[a,b]` | Node group names | Generate multiple groups | +| `$var` / `${var}` | `pod${p}/leaf` | Links, demands | Template expansion with `expand` block | +| `{n}` | `srv-{n}` | `template` | Node naming (1-indexed) | **These syntaxes are not interchangeable.** Each works only in its designated context. @@ -37,8 +37,8 @@ The DSL uses three distinct template syntaxes in different contexts: | Syntax | Operation | Key Difference | |--------|-----------|----------------| | `[1-3]` | Static generation | Creates multiple definitions at parse time | -| `${var}` | Template substitution | Requires explicit `expand_vars` mapping | -| `{node_num}` | Sequential counter | Auto-increments based on `node_count` | +| `${var}` | Template substitution | Requires explicit `expand` block with `vars` | +| `{n}` | Sequential counter | Auto-increments based on `count` | Bracket expansion generates structure; variable expansion parameterizes rules; node naming indexes instances. @@ -50,7 +50,7 @@ The DSL implements two fundamentally different selection patterns optimized for The DSL uses distinct selection strategies depending on the operation: -**1. Path-Based Node Selection** (adjacency rules, traffic demands, workflow steps) +**1. Path-Based Node Selection** (link rules, traffic demands, workflow steps) - Uses regex patterns on hierarchical node names - Supports capture group-based grouping @@ -60,19 +60,19 @@ The DSL uses distinct selection strategies depending on the operation: **2. Condition-Based Entity Selection** (failure rules, membership rules, risk group generation) -- Works on nodes, links, or risk_groups (`entity_scope`) +- Works on nodes, links, or risk_groups (`scope`) - Uses only attribute-based filtering (`conditions`) - No path/regex patterns (operates on all entities of specified type) These patterns share common primitives (condition evaluation, match specification) but serve different purposes and should not be confused. -### Adjacency Creation Flow +### Link Creation Flow -Adjacency rules create links between nodes using path-based selection with optional filtering: +Link definitions create links between nodes using path-based selection with optional filtering: ```mermaid flowchart TD - Start[Adjacency Definition] --> VarExpand{Has expand_vars?} + Start[Link Definition] --> VarExpand{Has expand block?} VarExpand -->|Yes| VarSubst[Variable Substitution] VarSubst --> PathFilter VarExpand -->|No| PathFilter[1. Path-Based Selection] @@ -98,7 +98,7 @@ flowchart TD - Uses `logic: "and"` or `"or"` (default: `"or"`) - Supports operators: `==`, `!=`, `<`, `>`, `contains`, `in`, etc. 3. **Active Filtering**: Filters disabled nodes based on context - - Adjacency default: `active_only=false` (creates links to disabled nodes) + - Links default: `active_only=false` (creates links to disabled nodes) 4. **Attribute Grouping**: Optional `group_by` overrides regex capture grouping 5. **Pattern Application**: Creates links between selected node groups - `mesh`: Every source to every target @@ -108,7 +108,7 @@ flowchart TD - `default_active_only=False` (links are created to disabled nodes) - `match.logic` defaults to `"or"` (inclusive matching) -- Supports variable expansion via `expand_vars` +- Supports variable expansion via `expand` block ### Traffic Demand Creation Flow @@ -116,33 +116,33 @@ Traffic demands follow a similar pattern but with important differences: ```mermaid flowchart TD - Start[Traffic Demand Spec] --> VarExpand{Has expand_vars?} + Start[Traffic Demand Spec] --> VarExpand{Has expand block?} VarExpand -->|Yes| VarSubst[Variable Substitution
Creates multiple demand specs] VarSubst --> Process VarExpand -->|No| Process[Process Single Demand] Process --> SrcSelect[1. Select Source Nodes] - SrcSelect --> SinkSelect[2. Select Sink Nodes] - SinkSelect --> SrcDesc[Uses same path + match + group_by
selection as adjacency] + SrcSelect --> TgtSelect[2. Select Target Nodes] + TgtSelect --> SrcDesc[Uses same path + match + group_by
selection as links] SrcDesc --> Mode{Demand Mode?} Mode -->|pairwise| Pairwise[3a. Pairwise Expansion] Mode -->|combine| Combine[3b. Combine Expansion] - Pairwise --> PairDesc[Create demand for each src-dst pair
Volume distributed evenly
No pseudo nodes] - Combine --> CombDesc[Create pseudo-source and pseudo-sink
Single aggregated demand
Augmentation edges connect real nodes] + Pairwise --> PairDesc[Create demand for each src-tgt pair
Volume distributed evenly
No pseudo nodes] + Combine --> CombDesc[Create pseudo-source and pseudo-target
Single aggregated demand
Augmentation edges connect real nodes] ``` -**Key Differences from Adjacency:** +**Key Differences from Links:** 1. **Active-only default**: `default_active_only=True` (only active nodes participate) -2. **Two selection phases**: Source nodes first, then sink nodes (both use same selector logic) +2. **Two selection phases**: Source nodes first, then target nodes (both use same selector logic) 3. **Expansion modes**: - - **Pairwise**: Creates individual demands for each (source, sink) pair + - **Pairwise**: Creates individual demands for each (source, target) pair - **Combine**: Creates pseudo nodes and a single aggregated demand 4. **Group modes**: Additional layer (`flatten`, `per_group`, `group_pairwise`) for handling grouped selections **Processing Steps:** 1. Select source nodes using unified selector (path + match + group_by) -2. Select sink nodes using unified selector +2. Select target nodes using unified selector 3. Apply mode-specific expansion: - **Pairwise**: Volume evenly distributed across all pairs - **Combine**: Single demand with pseudo nodes for aggregation @@ -160,11 +160,11 @@ flowchart TD Direct --> DirectDesc[Simply name the risk group
Entities reference it explicitly] - Member --> MemberScope[Specify entity_scope
node, link, or risk_group] + Member --> MemberScope[Specify scope
node, link, or risk_group] MemberScope --> MemberCond[Define match conditions
logic defaults to and] MemberCond --> MemberExec[Scan ALL entities of that scope
Add matching entities to risk group] - Generate --> GenScope[Specify entity_scope
node or link only] + Generate --> GenScope[Specify scope
node or link only] GenScope --> GenGroupBy[Specify group_by attribute] GenGroupBy --> GenExec[Collect unique values
Create risk group for each value
Add entities with that value] ``` @@ -184,8 +184,8 @@ flowchart TD ### Comparison Table -| Feature | Adjacency | Traffic Demands | Risk Groups | -|---------|-----------|----------------|-------------| +| Feature | Links | Traffic Demands | Risk Groups | +|---------|-------|-----------------|-------------| | Selection Type | Path-based | Path-based | Condition-based | | Regex Patterns | Yes | Yes | No | | Capture Groups | Yes | Yes | No | @@ -203,7 +203,7 @@ All selection mechanisms share common evaluation primitives: 1. **Condition evaluation**: `evaluate_condition()` handles all operators - Comparison: `==`, `!=`, `<`, `<=`, `>`, `>=` - String/collection: `contains`, `not_contains`, `in`, `not_in` - - Existence: `any_value`, `no_value` + - Existence: `exists`, `not_exists` 2. **Condition combining**: `evaluate_conditions()` applies `"and"`/`"or"` logic @@ -224,9 +224,9 @@ The DSL uses context-aware defaults to optimize for common use cases: | Context | Selection Type | Active Only | Match Logic | Rationale | |---------|---------------|-------------|-------------|-----------| -| Adjacency | Path-based | False | "or" | Create links to all nodes, including disabled | +| Links | Path-based | False | "or" | Create links to all nodes, including disabled | | Demands | Path-based | True | "or" | Only route traffic through active nodes | -| Node Overrides | Path-based | False | "or" | Modify all matching nodes | +| Node Rules | Path-based | False | "or" | Modify all matching nodes | | Workflow Steps | Path-based | True | "or" | Analyze only active topology | | Membership Rules | Condition-based | N/A | "and" | Precise matching for risk assignment | | Failure Rules | Condition-based | N/A | "or" | Inclusive matching for failure scenarios | @@ -242,9 +242,10 @@ blueprints: # Reusable network templates components: # Hardware component library risk_groups: # Failure correlation groups vars: # YAML anchors and variables for reuse -traffic_matrix_set: # Traffic demand definitions -failure_policy_set: # Failure simulation policies +demands: # Traffic demand definitions +failures: # Failure simulation policies workflow: # Analysis execution steps +seed: # Master seed for reproducibility (integer) ``` ## `network` - Core Foundation @@ -286,124 +287,124 @@ network: links: - source: SEA target: SFO - link_params: - capacity: 200 - cost: 6846 - attrs: - distance_km: 1369.13 - media_type: "fiber" - hardware: - source: {component: "800G-ZR+", count: 1} - target: {component: "1600G-2xDR4", count: 1} + capacity: 200 + cost: 6846 + attrs: + distance_km: 1369.13 + media_type: "fiber" + hardware: + source: {component: "800G-ZR+", count: 1} + target: {component: "1600G-2xDR4", count: 1} ``` Recognized keys for each link entry: - `source`, `target`: node names (required) -- `link_params`: mapping with only these keys allowed: `capacity`, `cost`, `disabled`, `risk_groups`, `attrs` -- `link_count`: integer number of parallel links to create (optional; default 1) +- `capacity`: link capacity (optional; default 1.0) +- `cost`: link cost (optional; default 1.0) +- `disabled`: boolean (optional) +- `risk_groups`: list of risk-group names (optional) +- `attrs`: mapping of attributes (optional) +- `count`: integer number of parallel links to create (optional; default 1) -### Group-Based Definitions +### Node Groups -**Node Groups:** +**Node Groups with Count/Template:** ```yaml network: - groups: + nodes: leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf-{n}" attrs: role: "leaf" spine: - node_count: 2 - name_template: "spine-{node_num}" + count: 2 + template: "spine-{n}" attrs: role: "spine" ``` -**Adjacency Rules:** +Creates: `leaf/leaf-1`, `leaf/leaf-2`, `leaf/leaf-3`, `leaf/leaf-4`, `spine/spine-1`, `spine/spine-2` + +**Link Definitions:** ```yaml network: - adjacency: + links: - source: /leaf target: /spine pattern: "mesh" # Connect every leaf to every spine - link_params: - capacity: 3200 - cost: 1 - # Only the following keys are allowed inside link_params: - # capacity, cost, disabled, risk_groups, attrs + capacity: 3200 + cost: 1 - source: /spine target: /spine pattern: "one_to_one" # Connect spines pairwise - link_count: 2 # Create 2 parallel links per adjacency (optional) - link_params: - capacity: 1600 - cost: 1 - attrs: - hardware: - source: {component: "800G-DR4", count: 2} - target: {component: "800G-DR4", count: 2} + count: 2 # Create 2 parallel links per pair (optional) + capacity: 1600 + cost: 1 + attrs: + hardware: + source: {component: "800G-DR4", count: 2} + target: {component: "800G-DR4", count: 2} ``` -### Attribute-filtered Adjacency (selector objects) +### Attribute-filtered Links (selector objects) You can filter the source or target node sets by attributes using the same condition syntax as failure policies. Replace a string `source`/`target` with an object that has `path` and optional `match`: ```yaml network: - adjacency: + links: - source: path: "/leaf" match: logic: "and" # default: "or" conditions: - attr: "role" - operator: "==" + op: "==" value: "leaf" target: path: "/spine" match: conditions: - attr: "role" - operator: "==" + op: "==" value: "spine" pattern: "mesh" - link_params: - capacity: 100 - cost: 1 + capacity: 100 + cost: 1 ``` Notes: - `path` is a regex pattern matched against node names (anchored at start via Python `re.match`). -- `match.conditions` uses the shared condition operators: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `any_value`, `no_value`. +- `match.conditions` uses the shared condition operators: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `exists`, `not_exists`. - Conditions evaluate over a flat view of node attributes combining top-level fields (`name`, `disabled`, `risk_groups`) and `node.attrs`. - `logic` in the `match` block accepts "and" or "or" (default "or"). -- Selectors filter node candidates before the adjacency `pattern` is applied. +- Selectors filter node candidates before the link `pattern` is applied. - Cross-endpoint predicates (e.g., comparing a source attribute to a target attribute) are not supported. -- Node overrides run before adjacency expansion; link overrides run after adjacency expansion. +- Node rules run before link expansion; link rules run after link creation. Path semantics: - All paths are relative to the current scope. There is no concept of absolute paths. - Leading `/` is stripped and has no functional effect - `/leaf` and `leaf` are equivalent. - Within a blueprint, paths resolve relative to the instantiation path. For example, if a blueprint is used under group `pod1`, then `source: /leaf` resolves to `pod1/leaf`. -- At top-level `network.adjacency`, the parent path is empty, so patterns match against full node names. +- At top-level `network.links`, the parent path is empty, so patterns match against full node names. Example with OR logic to match multiple roles: ```yaml network: - adjacency: + links: - source: path: "/metro1/dc[1-1]" match: conditions: - attr: "role" - operator: "==" + op: "==" value: "dc" target: path: "/metro1/pop[1-2]" @@ -411,10 +412,10 @@ network: logic: "or" conditions: - attr: "role" - operator: "==" + op: "==" value: "leaf" - attr: "role" - operator: "==" + op: "==" value: "core" pattern: "mesh" ``` @@ -422,32 +423,32 @@ network: **Connectivity Patterns:** - `mesh`: Full connectivity between all source and target nodes -- `one_to_one`: Pairwise connections. Compatible sizes means max(|S|,|T|) must be an integer multiple of min(|S|,|T|); mapping wraps modulo the smaller set (e.g., 4×2 and 6×3 valid; 3×2 invalid). +- `one_to_one`: Pairwise connections. Compatible sizes means max(|S|,|T|) must be an integer multiple of min(|S|,|T|); mapping wraps modulo the smaller set (e.g., 4x2 and 6x3 valid; 3x2 invalid). ### Bracket Expansion -Create multiple similar groups using bracket notation: +Create multiple similar node groups using bracket notation: ```yaml network: - groups: + nodes: dc[1-3]/rack[a,b]: # Creates dc1/racka, dc1/rackb, dc2/racka, etc. - node_count: 4 - name_template: "srv-{node_num}" + count: 4 + template: "srv-{n}" ``` **Expansion Types:** -- Numeric ranges: `[1-4]` → 1, 2, 3, 4 -- Explicit lists: `[red,blue,green]` → red, blue, green +- Numeric ranges: `[1-4]` -> 1, 2, 3, 4 +- Explicit lists: `[red,blue,green]` -> red, blue, green **Scope:** Bracket expansion applies to: -- **Group names** under `network.groups` and `blueprints.*.groups` +- **Node group names** under `network.nodes` and `blueprints.*.nodes` - **Risk group names** in top-level `risk_groups` definitions (including children) -- **Risk group membership arrays** on nodes, links, and groups +- **Risk group membership arrays** on nodes, links, and node groups -Component names, direct node names (`network.nodes`), and other string fields treat brackets as literal characters. +Component names, direct node names (`network.nodes` without count/template), and other string fields treat brackets as literal characters. **Risk Group Expansion Examples:** @@ -473,26 +474,28 @@ network: The range syntax `[start-end]` only supports integers. For letters, mixed sequences, or zero-padded numbers, use comma-separated explicit lists. -### Variable Expansion in Adjacency +### Variable Expansion in Links -Use `$var` or `${var}` syntax for template substitution: +Use `$var` or `${var}` syntax with an `expand` block for template substitution: ```yaml -adjacency: +links: - source: "plane${p}/rack${r}" target: "spine${s}" - expand_vars: - p: [1, 2] - r: ["a", "b"] - s: [1, 2, 3] - expansion_mode: "cartesian" # All combinations + expand: + vars: + p: [1, 2] + r: ["a", "b"] + s: [1, 2, 3] + mode: "cartesian" # All combinations pattern: "mesh" - source: "server${idx}" target: "switch${idx}" - expand_vars: - idx: [1, 2, 3, 4] - expansion_mode: "zip" # Paired by index + expand: + vars: + idx: [1, 2, 3, 4] + mode: "zip" # Paired by index pattern: "one_to_one" ``` @@ -503,45 +506,44 @@ Templates for network segments that can be instantiated multiple times: ```yaml blueprints: leaf_spine: - groups: + nodes: leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf-{n}" spine: - node_count: 2 - name_template: "spine-{node_num}" - adjacency: + count: 2 + template: "spine-{n}" + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 40 - cost: 1 + capacity: 40 + cost: 1 network: - groups: + nodes: pod1: - use_blueprint: leaf_spine + blueprint: leaf_spine pod2: - use_blueprint: leaf_spine - parameters: # Override blueprint parameters - leaf.node_count: 6 - spine.name_template: "core-{node_num}" + blueprint: leaf_spine + params: # Override blueprint parameters + leaf.count: 6 + spine.template: "core-{n}" ``` **Blueprint Features:** -- Define groups and adjacency rules once, reuse multiple times +- Define nodes and link rules once, reuse multiple times - Override parameters using dot notation during instantiation - Hierarchical naming: `pod1/leaf/leaf-1`, `pod2/spine/core-1` -## Node and Link Overrides +## Node and Link Rules Modify specific nodes or links after initial creation: ```yaml network: - node_overrides: + node_rules: - path: "^pod1/spine/.*$" # Regex pattern matching disabled: true attrs: @@ -550,25 +552,23 @@ network: attrs: priority: "high" - link_overrides: + link_rules: - source: "^pod1/leaf/.*$" target: "^pod1/spine/.*$" - link_params: - capacity: 100 # Override capacity + capacity: 100 # Override capacity - source: ".*/spine/.*" target: ".*/spine/.*" - any_direction: true # Bidirectional matching - link_params: - cost: 5 - attrs: - link_type: "backbone" + bidirectional: true # Bidirectional matching (default: true) + cost: 5 + attrs: + link_type: "backbone" +``` Notes: -- For `link_overrides`, only the keys `source`, `target`, `link_params`, and optional `any_direction` are allowed at the top level. All parameter changes must be nested under `link_params`. -- `any_direction` defaults to `true` if omitted. -- Ordering: `node_overrides` run after node creation (groups and direct nodes) and before any adjacency expansion; `link_overrides` run after adjacency and direct links. -``` +- For `link_rules`, link properties (`capacity`, `cost`, `disabled`, `risk_groups`, `attrs`) are specified at the top level (no wrapper). +- `bidirectional` defaults to `true` if omitted. +- Ordering: `node_rules` run after node creation (groups and direct nodes) and before any link expansion; `link_rules` run after links are created. ## `components` - Hardware Library @@ -620,11 +620,10 @@ network: links: - source: spine-1 target: leaf-1 - link_params: - attrs: - hardware: - source: {component: "Optic400G", count: 4} - target: {component: "Optic400G", count: 4} + attrs: + hardware: + source: {component: "Optic400G", count: 4} + target: {component: "Optic400G", count: 4} ``` ## `risk_groups` - Risk Modeling @@ -683,7 +682,7 @@ Risk groups can model any failure correlation pattern. Below are some common exa ### Example 1: Physical Infrastructure (Fiber Links) -One common use case is modeling physical infrastructure. For fiber links, you might use a hierarchy like Path → Conduit → Fiber Pair. +One common use case is modeling physical infrastructure. For fiber links, you might use a hierarchy like Path -> Conduit -> Fiber Pair. ```yaml risk_groups: @@ -707,12 +706,11 @@ network: links: - source: NYC target: CHI - link_params: - risk_groups: ["Conduit_NYC_CHI_C1"] - attrs: - fiber: - path_id: "NYC-CHI" - conduit_id: "NYC-CHI-C1" + risk_groups: ["Conduit_NYC_CHI_C1"] + attrs: + fiber: + path_id: "NYC-CHI" + conduit_id: "NYC-CHI-C1" ``` **Cascading behavior:** @@ -723,7 +721,7 @@ network: ### Example 2: Physical Infrastructure (Data Center Nodes) -For data center nodes, you might model facility infrastructure with a hierarchy like Building → Room → Power Zone. +For data center nodes, you might model facility infrastructure with a hierarchy like Building -> Room -> Power Zone. ```yaml risk_groups: @@ -787,26 +785,26 @@ Dynamically assign entities to risk groups based on attributes: risk_groups: - name: Conduit_NYC_CHI_C1 membership: - entity_scope: link + scope: link match: logic: and # "and" or "or" (default: "and") conditions: - attr: fiber.conduit_id - operator: "==" + op: "==" value: "NYC-CHI-C1" - name: PowerZone_DC1_R1_PZA membership: - entity_scope: node + scope: node match: logic: and conditions: - attr: facility.power_zone - operator: "==" + op: "==" value: "DC1-R1-PZ-A" ``` -**Note:** Membership rules default to `logic: "and"` (stricter than adjacency/demand selectors which default to `"or"`). This ensures precise entity matching for failure correlation. +**Note:** Membership rules default to `logic: "and"` (stricter than link/demand selectors which default to `"or"`). This ensures precise entity matching for failure correlation. ### Generated Risk Groups @@ -816,17 +814,17 @@ Automatically create risk groups from entity attributes: risk_groups: # Generate risk groups from fiber path attributes on links - generate: - entity_scope: link + scope: link group_by: fiber.path_id - name_template: Path_${value} + name: Path_${value} attrs: type: fiber_path # Generate risk groups from facility attributes on nodes - generate: - entity_scope: node + scope: node group_by: facility.building_id - name_template: Building_${value} + name: Building_${value} attrs: type: building ``` @@ -895,27 +893,27 @@ network: - Anchors can be defined in any section, not just `vars` - Merge operations follow YAML 1.1 semantics (later keys override earlier ones) -## `traffic_matrix_set` - Traffic Analysis +## `demands` - Traffic Analysis Define traffic demand patterns for capacity analysis: ```yaml -traffic_matrix_set: +demands: production: # Simple string pattern selectors - source: "^servers/.*" - sink: "^storage/.*" - demand: 1000 + target: "^storage/.*" + volume: 1000 mode: "combine" priority: 1 - flow_policy_config: "SHORTEST_PATHS_ECMP" + flow_policy: "SHORTEST_PATHS_ECMP" # Dict selectors with attribute-based grouping - source: group_by: "dc" # Group nodes by datacenter attribute - sink: + target: group_by: "dc" - demand: 500 + volume: 500 mode: "pairwise" priority: 2 @@ -925,42 +923,44 @@ traffic_matrix_set: match: conditions: - attr: "role" - operator: "==" + op: "==" value: "leaf" - sink: + target: path: "^dc2/.*" match: conditions: - attr: "role" - operator: "==" + op: "==" value: "spine" - demand: 200 + volume: 200 mode: "combine" ``` ### Variable Expansion in Demands -Use `expand_vars` to generate multiple demands from a template: +Use an `expand` block to generate multiple demands from a template: ```yaml -traffic_matrix_set: +demands: inter_dc: - source: "^${src_dc}/.*" - sink: "^${dst_dc}/.*" - demand: 100 + target: "^${dst_dc}/.*" + volume: 100 mode: "combine" - expand_vars: - src_dc: ["dc1", "dc2"] - dst_dc: ["dc2", "dc3"] - expansion_mode: "cartesian" # All combinations (default) + expand: + vars: + src_dc: ["dc1", "dc2"] + dst_dc: ["dc2", "dc3"] + mode: "cartesian" # All combinations (default) - source: "^${dc}/leaf/.*" - sink: "^${dc}/spine/.*" - demand: 50 + target: "^${dc}/spine/.*" + volume: 50 mode: "pairwise" - expand_vars: - dc: ["dc1", "dc2", "dc3"] - expansion_mode: "zip" # Paired by index + expand: + vars: + dc: ["dc1", "dc2", "dc3"] + mode: "zip" # Paired by index ``` **Expansion Modes:** @@ -970,15 +970,15 @@ traffic_matrix_set: ### Selector Fields -The `source` and `sink` fields accept either: +The `source` and `target` fields accept either: - A string regex pattern matched against node names - A selector object with `path`, `group_by`, and/or `match` fields ### Traffic Modes -- `combine`: Single aggregate flow between source and sink groups -- `pairwise`: Individual flows between all source-sink node pairs +- `combine`: Single aggregate flow between source and target groups +- `pairwise`: Individual flows between all source-target node pairs ### Flow Policies @@ -990,71 +990,71 @@ The `source` and `sink` fields accept either: See [Flow Policy Presets](design.md#flow-policy-presets) for detailed configuration mapping and real-world network behavior. -## `failure_policy_set` - Failure Simulation +## `failures` - Failure Simulation Define failure policies for resilience testing: ```yaml -failure_policy_set: +failures: single_link_failure: modes: # Weighted modes; exactly one mode fires per iteration - weight: 1.0 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 1 weighted_modes: # Example of weighted multi-mode policy modes: - weight: 0.30 rules: - - entity_scope: "risk_group" - rule_type: "choice" + - scope: "risk_group" + mode: "choice" count: 1 weight_by: distance_km - weight: 0.35 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 3 conditions: - attr: link_type - operator: "==" + op: "==" value: dc_to_pop logic: and weight_by: target_capacity - weight: 0.25 rules: - - entity_scope: "node" - rule_type: "choice" + - scope: "node" + mode: "choice" count: 1 conditions: - attr: node_type - operator: "!=" + op: "!=" value: dc_region logic: and weight_by: attached_capacity_gbps - weight: 0.10 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 4 conditions: - attr: link_type - operator: "==" + op: "==" value: leaf_spine - attr: link_type - operator: "==" + op: "==" value: intra_group - attr: link_type - operator: "==" + op: "==" value: inter_group - attr: link_type - operator: "==" + op: "==" value: internal_mesh logic: or ``` -**Rule Types:** +**Selection Modes:** - `all`: Select all matching entities - `choice`: Select specific count of entities @@ -1063,8 +1063,8 @@ failure_policy_set: Notes: - Policies are mode-based. Each mode has a non-negative `weight`. One mode is chosen per iteration with probability proportional to weights, then all rules in that mode are applied and their selections are unioned. -- Each rule has `entity_scope` ("node" | "link" | "risk_group"), optional `logic` ("and" | "or"; defaults to "or"), optional `conditions`, and one of `rule_type` parameters (`count` for choice, `probability` for random). `weight_by` can be provided for weighted sampling in `choice` rules. -- Condition language is the same as used in adjacency `match` selectors (see below) and supports: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `any_value`, `no_value`. Conditions evaluate on a flat attribute mapping that includes top-level fields and `attrs`. +- Each rule has `scope` ("node" | "link" | "risk_group"), optional `logic` ("and" | "or"; defaults to "or"), optional `conditions`, and one of the selection mode parameters (`count` for choice, `probability` for random). `weight_by` can be provided for weighted sampling in `choice` rules. +- Condition language is the same as used in link `match` selectors and supports: `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `exists`, `not_exists`. Conditions evaluate on a flat attribute mapping that includes top-level fields and `attrs`. ## `workflow` - Execution Steps @@ -1072,14 +1072,14 @@ Define analysis workflow steps: ```yaml workflow: - - step_type: NetworkStats + - type: NetworkStats name: network_statistics - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd_baseline - matrix_name: baseline_traffic_matrix - - step_type: TrafficMatrixPlacement + demand_set: baseline_traffic_matrix + - type: TrafficMatrixPlacement name: tm_placement - matrix_name: baseline_traffic_matrix + demand_set: baseline_traffic_matrix failure_policy: weighted_modes iterations: 1000 ``` @@ -1089,14 +1089,14 @@ workflow: - `BuildGraph`: Export graph to JSON (node-link) for external analysis - `NetworkStats`: Compute basic statistics - `MaxFlow`: Monte Carlo capacity analysis between node groups -- `TrafficMatrixPlacement`: Monte Carlo demand placement for a named matrix -- `MaximumSupportedDemand`: Search for `alpha_star` for a named matrix +- `TrafficMatrixPlacement`: Monte Carlo demand placement for a named demand set +- `MaximumSupportedDemand`: Search for `alpha_star` for a named demand set See [Workflow Reference](workflow.md) for detailed configuration. ## Node Selection -NetGraph provides a unified selector system for selecting and grouping nodes across adjacency, demands, and workflow steps. +NetGraph provides a unified selector system for selecting and grouping nodes across links, demands, and workflow steps. ### Selector Forms @@ -1176,31 +1176,31 @@ source: logic: "and" # "and" or "or" (default: "or") conditions: - attr: "role" - operator: "==" + op: "==" value: "leaf" - attr: "tier" - operator: ">=" + op: ">=" value: 2 ``` -**Supported operators:** `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `in`, `not_in`, `any_value`, `no_value` +**Supported operators:** `==`, `!=`, `<`, `<=`, `>`, `>=`, `contains`, `not_contains`, `in`, `not_in`, `exists`, `not_exists` ### Workflow Examples ```yaml workflow: - - step_type: MaxFlow + - type: MaxFlow source: group_by: "metro" # Group by metro attribute - sink: "^metro2/.*" # String pattern + target: "^metro2/.*" # String pattern mode: "pairwise" ``` -### Adjacency Examples +### Link Examples ```yaml network: - adjacency: + links: - source: group_by: "role" target: diff --git a/docs/reference/schemas.md b/docs/reference/schemas.md index b01b370..5885f53 100644 --- a/docs/reference/schemas.md +++ b/docs/reference/schemas.md @@ -89,10 +89,12 @@ jsonschema.validate(data, schema) - `network` - Network topology definition - `blueprints` - Reusable network templates - `risk_groups` - Risk group definitions -- `failure_policy_set` - Named failure policies -- `traffic_matrix_set` - Named traffic matrices +- `failures` - Named failure policies +- `demands` - Named demand sets - `workflow` - Workflow step definitions - `components` - Hardware component library +- `vars` - YAML anchors and variables for reuse +- `seed` - Master random seed for reproducibility ## Schema Maintenance diff --git a/docs/reference/workflow.md b/docs/reference/workflow.md index a00f46a..fa97f9e 100644 --- a/docs/reference/workflow.md +++ b/docs/reference/workflow.md @@ -16,14 +16,14 @@ Workflows are ordered steps executed on a scenario. Each step computes a result ```yaml workflow: - - step_type: NetworkStats + - type: NetworkStats name: network_statistics - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd_baseline - matrix_name: baseline_traffic_matrix - - step_type: TrafficMatrixPlacement + demand_set: baseline_traffic_matrix + - type: TrafficMatrixPlacement name: tm_placement - matrix_name: baseline_traffic_matrix + demand_set: baseline_traffic_matrix failure_policy: random_failures iterations: 1000 ``` @@ -41,7 +41,7 @@ workflow: Validates network topology and exports node-link JSON for external analysis. Optional for other workflow steps. ```yaml -- step_type: BuildGraph +- type: BuildGraph name: build_graph add_reverse: true # Add reverse edges for bidirectional connectivity (default: true) ``` @@ -55,7 +55,7 @@ Parameters: Compute node, link, and degree metrics. Supports temporary exclusions without modifying the base network. ```yaml -- step_type: NetworkStats +- type: NetworkStats name: baseline_stats include_disabled: false # Include disabled nodes/links in stats excluded_nodes: [] # Optional: Temporary node exclusions @@ -73,10 +73,10 @@ Parameters: Monte Carlo maximum flow analysis between node groups. Baseline (no failures) is always run first as a separate reference. ```yaml -- step_type: MaxFlow +- type: MaxFlow name: capacity_analysis source: "^servers/.*" - sink: "^storage/.*" + target: "^storage/.*" mode: "combine" # combine | pairwise failure_policy: random_failures iterations: 1000 # Number of failure iterations @@ -91,18 +91,18 @@ Monte Carlo maximum flow analysis between node groups. Baseline (no failures) is ### TrafficMatrixPlacement -Monte Carlo placement of a named traffic matrix with optional alpha scaling. Baseline (no failures) is always run first as a separate reference. +Monte Carlo placement of a named demand set with optional alpha scaling. Baseline (no failures) is always run first as a separate reference. ```yaml -- step_type: TrafficMatrixPlacement +- type: TrafficMatrixPlacement name: tm_placement - matrix_name: default - failure_policy: random_failures # Optional: policy name in failure_policy_set - iterations: 100 # Number of failure iterations + demand_set: default + failure_policy: random_failures # Optional: policy name in failures section + iterations: 100 # Number of failure iterations parallelism: auto - placement_rounds: auto # or an integer - include_flow_details: true # cost_distribution per flow - include_used_edges: false # include per-demand used edge lists + placement_rounds: auto # or an integer + include_flow_details: true # cost_distribution per flow + include_used_edges: false # include per-demand used edge lists store_failure_patterns: false # Alpha scaling – explicit or from another step alpha: 1.0 @@ -114,7 +114,7 @@ Outputs: - metadata: iterations, parallelism, analysis_function, policy_name, execution_time, unique_patterns -- data.context: matrix_name, placement_rounds, include_flow_details, +- data.context: demand_set, placement_rounds, include_flow_details, include_used_edges, base_demands, alpha, alpha_source ### MaximumSupportedDemand @@ -122,9 +122,9 @@ Outputs: Search for the maximum uniform traffic multiplier `alpha_star` that is fully placeable. ```yaml -- step_type: MaximumSupportedDemand +- type: MaximumSupportedDemand name: msd_default - matrix_name: default + demand_set: default acceptance_rule: hard # Currently only "hard" is supported alpha_start: 1.0 # Starting alpha value for search growth_factor: 2.0 # Growth factor for bracketing (must be > 1.0) @@ -139,7 +139,7 @@ Search for the maximum uniform traffic multiplier `alpha_star` that is fully pla Parameters: -- `matrix_name`: Name of the traffic matrix to analyze (default: "default"). +- `demand_set`: Name of the demand set to analyze (default: "default"). - `acceptance_rule`: Acceptance rule for feasibility (currently only "hard" is supported). - `alpha_start`: Initial alpha value to probe. - `growth_factor`: Multiplier for bracketing phase (must be > 1.0). @@ -163,7 +163,7 @@ Outputs: Aggregate platform and optics capex/power by hierarchy level (split by `/`). ```yaml -- step_type: CostPower +- type: CostPower name: cost_power include_disabled: false aggregation_level: 2 @@ -210,7 +210,7 @@ source: match: conditions: - attr: "tier" - operator: "==" + op: "==" value: "leaf" ``` @@ -248,23 +248,23 @@ source: ### Flow Analysis Modes -**`combine` Mode**: Aggregates all source matches into one virtual source, all sink matches into one virtual sink. Produces single flow value. +**`combine` Mode**: Aggregates all source matches into one virtual source, all target matches into one virtual target. Produces single flow value. -**`pairwise` Mode**: Computes flow between each source group and sink group pair. Produces flow matrix keyed by `(source_group, sink_group)`. +**`pairwise` Mode**: Computes flow between each source group and target group pair. Produces flow matrix keyed by `(source_group, target_group)`. ## MaxFlow Parameters ### Required Parameters - `source`: Node selector for source nodes (string pattern or selector object) -- `sink`: Node selector for sink nodes (string pattern or selector object) +- `target`: Node selector for target nodes (string pattern or selector object) ### Analysis Configuration ```yaml mode: combine # combine | pairwise (default: combine) iterations: 1000 # Failure iterations to run (default: 1) -failure_policy: policy_name # Name in failure_policy_set (default: null) +failure_policy: policy_name # Name in failures section (default: null) parallelism: auto # Worker processes (default: auto) shortest_path: false # Restrict to shortest paths (default: false) require_capacity: true # Path selection considers capacity (default: true) @@ -285,7 +285,7 @@ Exported results have a fixed top-level structure. Keys under `workflow` and `st { "workflow": { "network_statistics": { - "step_type": "NetworkStats", + "type": "NetworkStats", "step_name": "network_statistics", "execution_order": 0, "scenario_seed": 42, @@ -303,7 +303,7 @@ Exported results have a fixed top-level structure. Keys under `workflow` and `st "metadata": { "duration_sec": 1.234 }, "data": { "alpha_star": 1.37, - "context": { "matrix_name": "baseline_traffic_matrix" } + "context": { "demand_set": "baseline_traffic_matrix" } } }, "tm_placement": { @@ -318,11 +318,11 @@ Exported results have a fixed top-level structure. Keys under `workflow` and `st "data": {} } ], - "context": { "matrix_name": "baseline_traffic_matrix" } + "context": { "demand_set": "baseline_traffic_matrix" } } } }, - "scenario": { "seed": 42, "failure_policy_set": { }, "traffic_matrices": { } } + "scenario": { "seed": 42, "failures": { }, "demands": { } } } ``` diff --git a/ngraph/__init__.py b/ngraph/__init__.py index 838f068..b534063 100644 --- a/ngraph/__init__.py +++ b/ngraph/__init__.py @@ -36,7 +36,6 @@ from ngraph.analysis.failure_manager import FailureManager from ngraph.explorer import NetworkExplorer from ngraph.lib.nx import EdgeMap, NodeMap, from_networkx, to_networkx -from ngraph.model.demand.matrix import TrafficMatrixSet from ngraph.model.demand.spec import TrafficDemand from ngraph.model.flow.policy_config import FlowPolicyPreset from ngraph.model.network import Link, Network, Node, RiskGroup @@ -56,7 +55,6 @@ "Link", "RiskGroup", "Path", - "TrafficMatrixSet", "TrafficDemand", "FlowPolicyPreset", "Scenario", diff --git a/ngraph/_version.py b/ngraph/_version.py index 6b84c81..ed772b5 100644 --- a/ngraph/_version.py +++ b/ngraph/_version.py @@ -2,4 +2,4 @@ __all__ = ["__version__"] -__version__ = "0.16.0" +__version__ = "0.17.0" diff --git a/ngraph/analysis/context.py b/ngraph/analysis/context.py index a3739cd..490eea0 100644 --- a/ngraph/analysis/context.py +++ b/ngraph/analysis/context.py @@ -73,10 +73,7 @@ def _get_active_node_names( nodes: List[Any], excluded_nodes: Optional[Set[str]] = None, ) -> List[str]: - """Extract names of active (non-disabled) nodes, optionally excluding some. - - This is a local helper to replace utils.nodes.get_active_node_names. - """ + """Extract names of active (non-disabled) nodes, optionally excluding some.""" if excluded_nodes: return [ n.name for n in nodes if not n.disabled and n.name not in excluded_nodes @@ -1628,7 +1625,7 @@ def _construct_max_flow_result( if core_summary is not None and len(core_summary.costs) > 0: cost_dist = { float(c): float(f) - for c, f in zip(core_summary.costs, core_summary.flows, strict=False) + for c, f in zip(core_summary.costs, core_summary.flows, strict=True) } return MaxFlowResult( total_flow=flow_value, diff --git a/ngraph/analysis/demand.py b/ngraph/analysis/demand.py index 3bf869d..bf0f841 100644 --- a/ngraph/analysis/demand.py +++ b/ngraph/analysis/demand.py @@ -7,10 +7,9 @@ from __future__ import annotations from dataclasses import dataclass, replace -from typing import Any, Dict, Iterator, List +from typing import Dict, List from ngraph.analysis.context import LARGE_CAPACITY, AugmentationEdge -from ngraph.dsl.expansion import ExpansionSpec, expand_templates from ngraph.dsl.selectors import normalize_selector, select_nodes from ngraph.model.demand.spec import TrafficDemand from ngraph.model.flow.policy_config import FlowPolicyPreset @@ -89,21 +88,21 @@ def _expand_combine( for src_name in src_names: augmentations.append(AugmentationEdge(pseudo_src, src_name, LARGE_CAPACITY, 0)) - # Real sinks -> pseudo-sink (unidirectional IN) + # Real targets -> pseudo-target (unidirectional IN) for dst_name in dst_names: augmentations.append(AugmentationEdge(dst_name, pseudo_snk, LARGE_CAPACITY, 0)) # Single aggregated demand - demand = ExpandedDemand( + expanded = ExpandedDemand( src_name=pseudo_src, dst_name=pseudo_snk, - volume=td.demand, + volume=td.volume, priority=td.priority, policy_preset=policy_preset, demand_id=td.id, ) - return [demand], augmentations + return [expanded], augmentations def _expand_pairwise( @@ -125,7 +124,7 @@ def _expand_pairwise( return [], [] # Distribute volume evenly - volume_per_pair = td.demand / len(pairs) + volume_per_pair = td.volume / len(pairs) demands = [ ExpandedDemand( @@ -142,86 +141,6 @@ def _expand_pairwise( return demands, [] # No augmentations for pairwise -def _extract_selector_templates(selector: Any, prefix: str) -> Dict[str, str]: - """Extract string fields from a selector that may contain variables. - - Args: - selector: String path or dict selector. - prefix: Key prefix for the returned template dict. - - Returns: - Dict mapping template keys to string values that may contain $var. - """ - templates: Dict[str, str] = {} - if isinstance(selector, str): - templates[prefix] = selector - elif isinstance(selector, dict): - if "path" in selector and isinstance(selector["path"], str): - templates[f"{prefix}.path"] = selector["path"] - if "group_by" in selector and isinstance(selector["group_by"], str): - templates[f"{prefix}.group_by"] = selector["group_by"] - return templates - - -def _rebuild_selector(original: Any, substituted: Dict[str, str], prefix: str) -> Any: - """Rebuild a selector with substituted values. - - Args: - original: Original selector (string or dict). - substituted: Dict of substituted template values. - prefix: Key prefix used in substituted dict. - - Returns: - Selector with variables substituted. - """ - if isinstance(original, str): - return substituted.get(prefix, original) - - if isinstance(original, dict): - result = dict(original) - if f"{prefix}.path" in substituted: - result["path"] = substituted[f"{prefix}.path"] - if f"{prefix}.group_by" in substituted: - result["group_by"] = substituted[f"{prefix}.group_by"] - return result - - return original - - -def _expand_with_variables(td: TrafficDemand) -> Iterator[TrafficDemand]: - """Expand a TrafficDemand using its expand_vars specification. - - Yields one or more TrafficDemand instances with variables substituted. - Handles both string and dict selectors correctly. - """ - if not td.expand_vars: - yield td - return - - spec = ExpansionSpec( - expand_vars=td.expand_vars, - expansion_mode=td.expansion_mode, # type: ignore[arg-type] - ) - - # Extract string templates from selectors (handles both str and dict) - templates = _extract_selector_templates(td.source, "source") - templates.update(_extract_selector_templates(td.sink, "sink")) - - if not templates: - # No expandable string fields - yield as-is - yield td - return - - # Expand templates and rebuild selectors - for substituted in expand_templates(templates, spec): - yield replace( - td, - source=_rebuild_selector(td.source, substituted, "source"), - sink=_rebuild_selector(td.sink, substituted, "sink"), - expand_vars={}, # Clear to prevent re-expansion - ) - - def _expand_by_group_mode( td: TrafficDemand, src_groups: Dict[str, List[Node]], @@ -288,13 +207,13 @@ def _expand_by_group_mode( return [], [] # Divide volume among group pairs - volume_per_group_pair = td.demand / len(group_pairs) + volume_per_group_pair = td.volume / len(group_pairs) for src_label, dst_label in group_pairs: group_td = replace( td, id=f"{td.id}|{src_label}|{dst_label}", - demand=volume_per_group_pair, + volume=volume_per_group_pair, ) single_src = {src_label: src_groups[src_label]} single_dst = {dst_label: dst_groups[dst_label]} @@ -325,15 +244,17 @@ def expand_demands( """Expand TrafficDemand specifications into concrete demands with augmentations. Pure function that: - 1. Expands variables in selectors using expand_vars - 2. Normalizes and evaluates selectors to get node groups - 3. Distributes volume based on mode (combine/pairwise) and group_mode - 4. Generates augmentation edges for combine mode (pseudo nodes) - 5. Returns demands (node names) + augmentations + 1. Normalizes and evaluates selectors to get node groups + 2. Distributes volume based on mode (combine/pairwise) and group_mode + 3. Generates augmentation edges for combine mode (pseudo nodes) + 4. Returns demands (node names) + augmentations Node names are used (not IDs) so expansion happens BEFORE graph building. IDs are resolved after graph is built with augmentations. + Note: Variable expansion (expand: block) is handled during YAML parsing in + build_demand_set(), so TrafficDemand objects here are already expanded. + Args: network: Network for node selection. traffic_demands: High-level demand specifications. @@ -349,35 +270,33 @@ def expand_demands( all_augmentations: List[AugmentationEdge] = [] for td in traffic_demands: - # Step 1: Variable expansion (if expand_vars present) - for expanded_td in _expand_with_variables(td): - # Step 2: Normalize selectors - src_sel = normalize_selector(expanded_td.source, "demand") - sink_sel = normalize_selector(expanded_td.sink, "demand") + # Step 1: Normalize selectors + src_sel = normalize_selector(td.source, "demand") + tgt_sel = normalize_selector(td.target, "demand") - # Step 3: Select nodes (active_only=True for demands by context default) - src_groups = select_nodes(network, src_sel, default_active_only=True) - dst_groups = select_nodes(network, sink_sel, default_active_only=True) + # Step 2: Select nodes (active_only=True for demands by context default) + src_groups = select_nodes(network, src_sel, default_active_only=True) + dst_groups = select_nodes(network, tgt_sel, default_active_only=True) - if not src_groups or not dst_groups: - continue + if not src_groups or not dst_groups: + continue - policy_preset = expanded_td.flow_policy_config or default_policy_preset + policy_preset = td.flow_policy or default_policy_preset - # Step 4: Expand by group_mode - demands, augmentations = _expand_by_group_mode( - expanded_td, src_groups, dst_groups, policy_preset - ) + # Step 3: Expand by group_mode + demands, augmentations = _expand_by_group_mode( + td, src_groups, dst_groups, policy_preset + ) - all_demands.extend(demands) - all_augmentations.extend(augmentations) + all_demands.extend(demands) + all_augmentations.extend(augmentations) if not all_demands: raise ValueError( "No demands could be expanded. Possible causes:\n" - " - Source/sink selectors don't match any nodes\n" + " - Source/target selectors don't match any nodes\n" " - All matching nodes are disabled\n" - " - Source and sink are identical (self-loops not allowed)" + " - Source and target are identical (self-loops not allowed)" ) # Sort by priority (lower = higher priority) diff --git a/ngraph/analysis/failure_manager.py b/ngraph/analysis/failure_manager.py index 5ec6eef..281c0e6 100644 --- a/ngraph/analysis/failure_manager.py +++ b/ngraph/analysis/failure_manager.py @@ -26,7 +26,7 @@ import os import time from concurrent.futures import ThreadPoolExecutor -from typing import TYPE_CHECKING, Any, Dict, Optional, Protocol, Set, TypeVar +from typing import TYPE_CHECKING, Any, Dict, Optional, Protocol, Set from ngraph.dsl.selectors import flatten_link_attrs, flatten_node_attrs from ngraph.logging import get_logger @@ -117,24 +117,15 @@ def _auto_adjust_parallelism(parallelism: int, analysis_func: Any) -> int: return parallelism -T = TypeVar("T") - - class AnalysisFunction(Protocol): """Protocol for analysis functions used with FailureManager. - Analysis functions should take a Network, exclusion sets, and any additional - keyword arguments, returning analysis results of any type. + Analysis functions take a Network, exclusion sets, and analysis-specific + parameters, returning results of any type. """ - def __call__( - self, - network: "Network", - excluded_nodes: Set[str], - excluded_links: Set[str], - **kwargs, - ) -> Any: - """Execute analysis on network with exclusions and optional parameters.""" + def __call__(self, *args: Any, **kwargs: Any) -> Any: + """Execute analysis on network with exclusions and parameters.""" ... @@ -421,7 +412,7 @@ def run_monte_carlo_analysis( ) logger.debug(f"Context built in {time.time() - cache_start:.3f}s") - elif "source" in analysis_kwargs and "sink" in analysis_kwargs: + elif "source" in analysis_kwargs and "target" in analysis_kwargs: # Max-flow analysis or sensitivity analysis from ngraph.analysis.functions import build_maxflow_context @@ -429,7 +420,7 @@ def run_monte_carlo_analysis( analysis_kwargs["context"] = build_maxflow_context( self.network, analysis_kwargs["source"], - analysis_kwargs["sink"], + analysis_kwargs["target"], mode=analysis_kwargs.get("mode", "combine"), ) logger.debug(f"Context built in {time.time() - cache_start:.3f}s") @@ -535,7 +526,7 @@ def run_monte_carlo_analysis( # Map unique task results back to their dedup keys key_to_result: dict[tuple, Any] = {} for (dedup_key, _arg), value in zip( - key_to_first_arg.items(), unique_result_values, strict=False + key_to_first_arg.items(), unique_result_values, strict=True ): key_to_result[dedup_key] = value else: @@ -765,7 +756,7 @@ def run_single_failure_scenario( def run_max_flow_monte_carlo( self, source: str | dict[str, Any], - sink: str | dict[str, Any], + target: str | dict[str, Any], mode: str = "combine", iterations: int = 100, parallelism: int = 1, @@ -775,19 +766,19 @@ def run_max_flow_monte_carlo( seed: int | None = None, store_failure_patterns: bool = False, include_flow_summary: bool = False, - **kwargs, + include_min_cut: bool = False, ) -> Any: """Analyze maximum flow capacity envelopes between node groups under failures. Computes statistical distributions (envelopes) of maximum flow capacity between - source and sink node groups across Monte Carlo failure scenarios. Results include + source and target node groups across Monte Carlo failure scenarios. Results include frequency-based capacity envelopes and optional failure pattern analysis. Baseline (no failures) is always run first as a separate reference. Args: source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: "combine" (aggregate) or "pairwise" (individual flows). iterations: Number of failure scenarios to simulate. parallelism: Number of parallel workers (auto-adjusted if needed). @@ -798,6 +789,7 @@ def run_max_flow_monte_carlo( seed: Optional seed for reproducible results. store_failure_patterns: Whether to store failure trace on results. include_flow_summary: Whether to collect detailed flow summary data. + include_min_cut: Whether to include min-cut edges in results. Returns: Dictionary with keys: @@ -820,13 +812,13 @@ def run_max_flow_monte_carlo( seed=seed, store_failure_patterns=store_failure_patterns, source=source, - sink=sink, + target=target, mode=mode, shortest_path=shortest_path, require_capacity=require_capacity, flow_placement=flow_placement, include_flow_details=include_flow_summary, - **kwargs, + include_min_cut=include_min_cut, ) return raw_results @@ -890,7 +882,7 @@ def _process_sensitivity_results( def run_demand_placement_monte_carlo( self, demands_config: list[dict[str, Any]] - | Any, # List of demand configs or TrafficMatrixSet + | Any, # List of demand configs or DemandSet iterations: int = 100, parallelism: int = 1, placement_rounds: int | str = "auto", @@ -898,7 +890,6 @@ def run_demand_placement_monte_carlo( store_failure_patterns: bool = False, include_flow_details: bool = False, include_used_edges: bool = False, - **kwargs, ) -> Any: """Analyze traffic demand placement success under failures. @@ -908,12 +899,14 @@ def run_demand_placement_monte_carlo( Baseline (no failures) is always run first as a separate reference. Args: - demands_config: List of demand configs or TrafficMatrixSet object. + demands_config: List of demand configs or DemandSet object. iterations: Number of failure scenarios to simulate. parallelism: Number of parallel workers (auto-adjusted if needed). placement_rounds: Optimization rounds for demand placement. seed: Optional seed for reproducible results. store_failure_patterns: Whether to store failure trace on results. + include_flow_details: Whether to include cost distribution details. + include_used_edges: Whether to include used edges in results. Returns: Dictionary with keys: @@ -926,10 +919,10 @@ def run_demand_placement_monte_carlo( # If caller passed a sequence of TrafficDemand objects, convert to dicts if not isinstance(demands_config, list): - # Accept TrafficMatrixSet or any container providing get_matrix()/matrices + # Accept DemandSet or any container providing get_all_demands() serializable_demands: list[dict[str, Any]] = [] if hasattr(demands_config, "get_all_demands"): - td_iter = demands_config.get_all_demands() # TrafficMatrixSet helper + td_iter = demands_config.get_all_demands() # DemandSet helper elif hasattr(demands_config, "demands"): # Accept a mock object exposing 'demands' for tests td_iter = demands_config.demands @@ -940,17 +933,11 @@ def run_demand_placement_monte_carlo( { "id": getattr(demand, "id", None), "source": getattr(demand, "source", ""), - "sink": getattr(demand, "sink", ""), - "demand": float(getattr(demand, "demand", 0.0)), + "target": getattr(demand, "target", ""), + "volume": float(getattr(demand, "volume", 0.0)), "mode": getattr(demand, "mode", "pairwise"), "group_mode": getattr(demand, "group_mode", "flatten"), - "expand_vars": getattr(demand, "expand_vars", {}), - "expansion_mode": getattr( - demand, "expansion_mode", "cartesian" - ), - "flow_policy_config": getattr( - demand, "flow_policy_config", None - ), + "flow_policy": getattr(demand, "flow_policy", None), "priority": int(getattr(demand, "priority", 0)), } ) @@ -966,14 +953,13 @@ def run_demand_placement_monte_carlo( placement_rounds=placement_rounds, include_flow_details=include_flow_details, include_used_edges=include_used_edges, - **kwargs, ) return raw_results def run_sensitivity_monte_carlo( self, source: str | dict[str, Any], - sink: str | dict[str, Any], + target: str | dict[str, Any], mode: str = "combine", iterations: int = 100, parallelism: int = 1, @@ -981,7 +967,6 @@ def run_sensitivity_monte_carlo( flow_placement: FlowPlacement | str = FlowPlacement.PROPORTIONAL, seed: int | None = None, store_failure_patterns: bool = False, - **kwargs, ) -> dict[str, Any]: """Analyze component criticality for flow capacity under failures. @@ -993,7 +978,7 @@ def run_sensitivity_monte_carlo( Args: source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: "combine" (aggregate) or "pairwise" (individual flows). iterations: Number of failure scenarios to simulate. parallelism: Number of parallel workers (auto-adjusted if needed). @@ -1023,11 +1008,10 @@ def run_sensitivity_monte_carlo( seed=seed, store_failure_patterns=store_failure_patterns, source=source, - sink=sink, + target=target, mode=mode, shortest_path=shortest_path, flow_placement=flow_placement, - **kwargs, ) # Aggregate component scores across iterations for statistical analysis @@ -1037,7 +1021,7 @@ def run_sensitivity_monte_carlo( # Augment metadata with analysis-specific context raw_results["metadata"]["source"] = source - raw_results["metadata"]["sink"] = sink + raw_results["metadata"]["target"] = target raw_results["metadata"]["mode"] = mode return raw_results diff --git a/ngraph/analysis/functions.py b/ngraph/analysis/functions.py index 585dc8b..251e0ae 100644 --- a/ngraph/analysis/functions.py +++ b/ngraph/analysis/functions.py @@ -1,11 +1,11 @@ """Flow analysis functions for network evaluation. -These functions are designed for use with FailureManager and follow the -AnalysisFunction protocol: analysis_func(network: Network, excluded_nodes: Set[str], -excluded_links: Set[str], **kwargs) -> Any. +These functions are designed for use with FailureManager. Each analysis function +takes a Network, exclusion sets, and analysis-specific parameters, returning +results of type FlowIterationResult. -All functions accept only simple, hashable parameters to ensure compatibility -with FailureManager's caching and multiprocessing systems. +Parameters should ideally be hashable for efficient caching in FailureManager; +non-hashable objects are identified by memory address for cache key generation. Graph caching enables efficient repeated analysis with different exclusion sets by building the graph once and using O(|excluded|) masks for exclusions. @@ -36,26 +36,27 @@ def _reconstruct_traffic_demands( """Reconstruct TrafficDemand objects from serialized config. Args: - demands_config: List of demand configurations. + demands_config: List of demand configurations with fields: + source, target, volume, mode, group_mode, flow_policy, priority. Returns: List of TrafficDemand objects with preserved IDs. """ - return [ - TrafficDemand( - id=config.get("id") or "", - source=config["source"], - sink=config["sink"], - demand=config["demand"], - mode=config.get("mode", "pairwise"), - group_mode=config.get("group_mode", "flatten"), - expand_vars=config.get("expand_vars", {}), - expansion_mode=config.get("expansion_mode", "cartesian"), - flow_policy_config=config.get("flow_policy_config"), - priority=config.get("priority", 0), + results = [] + for config in demands_config: + results.append( + TrafficDemand( + id=config.get("id") or "", + source=config["source"], + target=config.get("target", ""), + volume=config.get("volume", 0.0), + mode=config.get("mode", "pairwise"), + group_mode=config.get("group_mode", "flatten"), + flow_policy=config.get("flow_policy"), + priority=config.get("priority", 0), + ) ) - for config in demands_config - ] + return results if TYPE_CHECKING: @@ -67,7 +68,7 @@ def max_flow_analysis( excluded_nodes: Set[str], excluded_links: Set[str], source: str | dict[str, Any], - sink: str | dict[str, Any], + target: str | dict[str, Any], mode: str = "combine", shortest_path: bool = False, require_capacity: bool = True, @@ -75,7 +76,6 @@ def max_flow_analysis( include_flow_details: bool = False, include_min_cut: bool = False, context: Optional[AnalysisContext] = None, - **kwargs, ) -> FlowIterationResult: """Analyze maximum flow capacity between node groups. @@ -84,7 +84,7 @@ def max_flow_analysis( excluded_nodes: Set of node names to exclude temporarily. excluded_links: Set of link IDs to exclude temporarily. source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). shortest_path: Whether to use shortest paths only. require_capacity: If True (default), path selection considers available @@ -93,7 +93,6 @@ def max_flow_analysis( include_flow_details: Whether to collect cost distribution and similar details. include_min_cut: Whether to include min-cut edge list in entry data. context: Pre-built AnalysisContext for efficient repeated analysis. - **kwargs: Ignored. Accepted for interface compatibility. Returns: FlowIterationResult describing this iteration. @@ -105,7 +104,7 @@ def max_flow_analysis( if context is not None: ctx = context else: - ctx = analyze(network, source=source, sink=sink, mode=mode_enum) + ctx = analyze(network, source=source, sink=target, mode=mode_enum) flow_entries: list[FlowEntry] = [] total_demand = 0.0 @@ -191,7 +190,6 @@ def demand_placement_analysis( include_flow_details: bool = False, include_used_edges: bool = False, context: Optional[AnalysisContext] = None, - **kwargs, ) -> FlowIterationResult: """Analyze traffic demand placement success rates using Core directly. @@ -199,7 +197,7 @@ def demand_placement_analysis( 1. Builds Core infrastructure (graph, algorithms, flow_graph) or uses cached 2. Expands demands into concrete (src, dst, volume) tuples 3. Places each demand using SPF caching for cacheable policies - 4. Falls back to FlowPolicy for complex multi-flow policies + 4. Uses FlowPolicy for complex multi-flow policies 5. Aggregates results into FlowIterationResult SPF Caching Optimization: @@ -217,7 +215,6 @@ def demand_placement_analysis( include_flow_details: When True, include cost_distribution per flow. include_used_edges: When True, include set of used edges per demand in entry data. context: Pre-built AnalysisContext for fast repeated analysis. - **kwargs: Ignored. Accepted for interface compatibility. Returns: FlowIterationResult describing this iteration. @@ -293,18 +290,17 @@ def sensitivity_analysis( excluded_nodes: Set[str], excluded_links: Set[str], source: str | dict[str, Any], - sink: str | dict[str, Any], + target: str | dict[str, Any], mode: str = "combine", shortest_path: bool = False, flow_placement: FlowPlacement = FlowPlacement.PROPORTIONAL, context: Optional[AnalysisContext] = None, - **kwargs, ) -> FlowIterationResult: """Analyze component sensitivity to failures. Identifies critical edges (saturated edges) and computes the flow reduction caused by removing each one. Returns a FlowIterationResult where each - FlowEntry represents a source/sink pair with: + FlowEntry represents a source/target pair with: - demand/placed = max flow value (the capacity being analyzed) - dropped = 0.0 (baseline analysis, no failures applied) - data["sensitivity"] = {link_id:direction: flow_reduction} for critical edges @@ -314,14 +310,13 @@ def sensitivity_analysis( excluded_nodes: Set of node names to exclude temporarily. excluded_links: Set of link IDs to exclude temporarily. source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). shortest_path: If True, use single-tier shortest-path flow (IP/IGP mode). Reports only edges used under ECMP routing. If False (default), use full iterative max-flow (SDN/TE mode) and report all saturated edges. flow_placement: Flow placement strategy. context: Pre-built AnalysisContext for efficient repeated analysis. - **kwargs: Ignored. Accepted for interface compatibility. Returns: FlowIterationResult with sensitivity data in each FlowEntry.data. @@ -333,7 +328,7 @@ def sensitivity_analysis( if context is not None: ctx = context else: - ctx = analyze(network, source=source, sink=sink, mode=mode_enum) + ctx = analyze(network, source=source, sink=target, mode=mode_enum) # Get max flow values for each pair flow_values = ctx.max_flow( @@ -387,7 +382,7 @@ def build_demand_context( ) -> AnalysisContext: """Build an AnalysisContext for repeated demand placement analysis. - Pre-computes the graph with augmentations (pseudo source/sink nodes) for + Pre-computes the graph with augmentations (pseudo source/target nodes) for efficient repeated analysis with different exclusion sets. Args: @@ -413,22 +408,22 @@ def build_demand_context( def build_maxflow_context( network: "Network", source: str | dict[str, Any], - sink: str | dict[str, Any], + target: str | dict[str, Any], mode: str = "combine", ) -> AnalysisContext: """Build an AnalysisContext for repeated max-flow analysis. - Pre-computes the graph with pseudo source/sink nodes for all source/sink + Pre-computes the graph with pseudo source/target nodes for all source/target pairs, enabling O(|excluded|) mask building per iteration. Args: network: Network instance. source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). Returns: AnalysisContext ready for use with max_flow_analysis or sensitivity_analysis. """ mode_enum = Mode.COMBINE if mode == "combine" else Mode.PAIRWISE - return analyze(network, source=source, sink=sink, mode=mode_enum) + return analyze(network, source=source, sink=target, mode=mode_enum) diff --git a/ngraph/analysis/placement.py b/ngraph/analysis/placement.py index 21870c8..6dafac0 100644 --- a/ngraph/analysis/placement.py +++ b/ngraph/analysis/placement.py @@ -254,6 +254,9 @@ def _place_cached( residual = np.ascontiguousarray( flow_graph.residual_view(), dtype=np.float64 ) + # Note: Do NOT cache residual-based DAGs. The TE loop computes + # DAGs specific to this demand's placement; caching them would + # corrupt results for other demands from the same source. fresh_dists, fresh_dag = ctx.algorithms.spf( ctx.handle, src=src_id, @@ -265,7 +268,6 @@ def _place_cached( multipath=True, dtype="float64", ) - dag_cache[cache_key] = (fresh_dists, fresh_dag) if fresh_dists[dst_id] == float("inf"): break diff --git a/ngraph/cli.py b/ngraph/cli.py index 0c04fe0..90561f2 100644 --- a/ngraph/cli.py +++ b/ngraph/cli.py @@ -136,7 +136,7 @@ def _collect_step_path_fields(step: Any) -> list[tuple[str, str]]: """Return (field, pattern) pairs for fields that represent node selectors. Fields considered: - - `source` and `sink` selector fields with string values + - `source` and `target` selector fields with string values - names ending with "_path" or "_regex" with non-empty string values """ fields: list[tuple[str, str]] = [] @@ -148,7 +148,11 @@ def _collect_step_path_fields(step: Any) -> list[tuple[str, str]]: if not value.strip(): continue # Selector fields or pattern fields - if key in ("source", "sink") or key.endswith("_path") or key.endswith("_regex"): + if ( + key in ("source", "target") + or key.endswith("_path") + or key.endswith("_regex") + ): fields.append((key, value)) return fields @@ -546,16 +550,14 @@ def _print_failure_policies(failure_policy_set: Any, detail: bool) -> None: for ri, rule in enumerate(mode.rules[:3]): extra = ( f" count={getattr(rule, 'count', '')}" - if rule.rule_type == "choice" + if rule.mode == "choice" else ( f" p={getattr(rule, 'probability', '')}" - if rule.rule_type == "random" + if rule.mode == "random" else "" ) ) - print( - f" - {ri + 1}. {rule.entity_scope} {rule.rule_type}{extra}" - ) + print(f" - {ri + 1}. {rule.scope} {rule.mode}{extra}") if rule_count > 3: print(f" ... and {rule_count - 3} more rules") if policy_count > 5: @@ -563,37 +565,37 @@ def _print_failure_policies(failure_policy_set: Any, detail: bool) -> None: print(f" ... and {remaining} more") -def _print_traffic_matrices( - network: Any, tms: Any, detail: bool, total_enabled_link_capacity: float +def _print_demand_sets( + network: Any, ds: Any, detail: bool, total_enabled_link_capacity: float ) -> None: - """Print traffic matrices summary and capacity-vs-demand ratio if available. + """Print demand sets summary and capacity-vs-demand ratio if available. Args: network: Network instance for node pattern summarization. - tms: TrafficMatrixSet with defined matrices. + ds: DemandSet with defined sets. detail: Whether to print detailed tables. total_enabled_link_capacity: Sum of capacities of enabled links. """ - print("\n6. TRAFFIC MATRICES") + print("\n6. DEMAND SETS") print("-" * 30) - matrix_count = len(tms.matrices) - print(f" Total: {matrix_count}") - if not tms.matrices: + set_count = len(ds.sets) + print(f" Total: {set_count}") + if not ds.sets: return - # Capacity vs Demand summary across all matrices (shown first for visibility) + # Capacity vs Demand summary across all sets (shown first for visibility) try: grand_total_demand = 0.0 grand_demand_count = 0 - for demands in tms.matrices.values(): + for demands in ds.sets.values(): grand_demand_count += len(demands) for d in demands: - grand_total_demand += float(getattr(d, "demand", 0.0)) + grand_total_demand += float(getattr(d, "volume", 0.0)) print("\n Capacity vs Demand:") print(f" enabled link capacity: {total_enabled_link_capacity:,.1f}") print( - f" total demand (all matrices): {grand_total_demand:,.1f} ({grand_demand_count:,} demands)" + f" total demand (all sets): {grand_total_demand:,.1f} ({grand_demand_count:,} demands)" ) if total_enabled_link_capacity > 0.0 and grand_total_demand > 0.0: cap_per_demand = total_enabled_link_capacity / grand_total_demand @@ -605,57 +607,57 @@ def _print_traffic_matrices( except Exception as exc: # pragma: no cover (defensive) print(f" Capacity vs Demand: unable to compute ({type(exc).__name__}: {exc})") - matrix_items = list(tms.matrices.items())[:5] - for matrix_name, demands in matrix_items: + set_items = list(ds.sets.items())[:5] + for set_name, demands in set_items: demand_count = len(demands) - total_volume = sum(getattr(d, "demand", 0.0) for d in demands) + total_volume = sum(getattr(d, "volume", 0.0) for d in demands) print( - f" {matrix_name}: {demand_count} demand{'s' if demand_count != 1 else ''}" + f" {set_name}: {demand_count} demand{'s' if demand_count != 1 else ''}" ) src_counts: Dict[str, int] = {} - snk_counts: Dict[str, int] = {} + tgt_counts: Dict[str, int] = {} pair_counts: Dict[tuple[str, str], Dict[str, float | int]] = {} for d in demands: # Handle both string and dict selectors src_key = d.source if isinstance(d.source, str) else str(d.source) - snk_key = d.sink if isinstance(d.sink, str) else str(d.sink) + tgt_key = d.target if isinstance(d.target, str) else str(d.target) src_counts[src_key] = src_counts.get(src_key, 0) + 1 - snk_counts[snk_key] = snk_counts.get(snk_key, 0) + 1 - key = (src_key, snk_key) + tgt_counts[tgt_key] = tgt_counts.get(tgt_key, 0) + 1 + key = (src_key, tgt_key) stats = pair_counts.setdefault(key, {"count": 0, "volume": 0.0}) stats["count"] = int(stats["count"]) + 1 - stats["volume"] = float(stats["volume"]) + float(getattr(d, "demand", 0.0)) + stats["volume"] = float(stats["volume"]) + float(getattr(d, "volume", 0.0)) if detail: print(f" total demand: {total_volume:,.0f}") print( - f" unique source patterns: {len(src_counts)}; unique sink patterns: {len(snk_counts)}" + f" unique source patterns: {len(src_counts)}; unique target patterns: {len(tgt_counts)}" ) rows: list[list[str]] = [] - for (src_pat, snk_pat), stats in list(pair_counts.items())[:10]: + for (src_pat, tgt_pat), stats in list(pair_counts.items())[:10]: src_info = _summarize_pattern(src_pat, network) - snk_info = _summarize_pattern(snk_pat, network) + tgt_info = _summarize_pattern(tgt_pat, network) src_match = ( f"{src_info['groups']}g/{src_info['nodes']}n ({src_info['enabled_nodes']}e)" if "error" not in src_info else f"ERROR {src_info['error']}" ) - snk_match = ( - f"{snk_info['groups']}g/{snk_info['nodes']}n ({snk_info['enabled_nodes']}e)" - if "error" not in snk_info - else f"ERROR {snk_info['error']}" + tgt_match = ( + f"{tgt_info['groups']}g/{tgt_info['nodes']}n ({tgt_info['enabled_nodes']}e)" + if "error" not in tgt_info + else f"ERROR {tgt_info['error']}" ) label_preview = ", ".join(src_info.get("labels", [])) or "-" rows.append( [ src_pat, - snk_pat, + tgt_pat, str(int(stats["count"])), f"{float(stats['volume']):,.0f}", src_match, - snk_match, + tgt_match, label_preview, ] ) @@ -664,11 +666,11 @@ def _print_traffic_matrices( table = _format_table( [ "Source Pattern", - "Sink Pattern", + "Target Pattern", "Demands", "Total", "Src Match", - "Snk Match", + "Tgt Match", "Src Labels", ], rows, @@ -688,17 +690,17 @@ def _print_traffic_matrices( top_rows: list[list[str]] = [] for d in sorted_demands: src = d.source if isinstance(d.source, str) else str(d.source) - snk = d.sink if isinstance(d.sink, str) else str(d.sink) + tgt = d.target if isinstance(d.target, str) else str(d.target) top_rows.append( [ src, - snk, - f"{float(getattr(d, 'demand', 0.0)):,.1f}", + tgt, + f"{float(getattr(d, 'volume', 0.0)):,.1f}", str(getattr(d, "priority", 0)), ] ) top_table = _format_table( - ["Source", "Sink", "Offered", "Priority"], + ["Source", "Target", "Offered", "Priority"], top_rows, ) print( @@ -714,19 +716,19 @@ def _print_traffic_matrices( if isinstance(demand.source, str) else str(demand.source) ) - snk = ( - demand.sink - if isinstance(demand.sink, str) - else str(demand.sink) + tgt = ( + demand.target + if isinstance(demand.target, str) + else str(demand.target) ) - print(f" {i + 1}. {src} -> {snk} ({demand.demand})") + print(f" {i + 1}. {src} -> {tgt} ({demand.volume})") if demand_count > 3: print(f" ... and {demand_count - 3} more demands") else: print(f" total demand: {total_volume:,.0f}") print(" Node selection preview:") top_src = sorted(src_counts.items(), key=lambda kv: -kv[1])[:2] - top_snk = sorted(snk_counts.items(), key=lambda kv: -kv[1])[:2] + top_tgt = sorted(tgt_counts.items(), key=lambda kv: -kv[1])[:2] for name, _ in top_src: info = _summarize_pattern(name, network) if "error" in info: @@ -735,17 +737,17 @@ def _print_traffic_matrices( print( f" source {name}: {info['groups']} groups, {info['nodes']} nodes ({info['enabled_nodes']} enabled)" ) - for name, _ in top_snk: + for name, _ in top_tgt: info = _summarize_pattern(name, network) if "error" in info: - print(f" sink {name}: ERROR {info['error']}") + print(f" target {name}: ERROR {info['error']}") else: print( - f" sink {name}: {info['groups']} groups, {info['nodes']} nodes ({info['enabled_nodes']} enabled)" + f" target {name}: {info['groups']} groups, {info['nodes']} nodes ({info['enabled_nodes']} enabled)" ) - if matrix_count > 5: - remaining = matrix_count - 5 + if set_count > 5: + remaining = set_count - 5 print(f" ... and {remaining} more") @@ -879,7 +881,7 @@ def _inspect_scenario(path: Path, detail: bool = False) -> None: scenario = Scenario.from_yaml(yaml_text) logger.debug( - "Scenario loaded: nodes=%d, links=%d, steps=%d, policies=%d, matrices=%d", + "Scenario loaded: nodes=%d, links=%d, steps=%d, policies=%d, demand_sets=%d", len(getattr(scenario.network, "nodes", {})), len(getattr(scenario.network, "links", {})), len( @@ -888,7 +890,7 @@ def _inspect_scenario(path: Path, detail: bool = False) -> None: or [] ), len(getattr(scenario.failure_policy_set, "policies", {})), - len(getattr(scenario.traffic_matrix_set, "matrices", {})), + len(getattr(scenario.demand_set, "sets", {})), ) logger.info("✓ Scenario validated and loaded successfully") @@ -910,15 +912,15 @@ def _inspect_scenario(path: Path, detail: bool = False) -> None: sum(float(lk.capacity) for lk in enabled_links) ) - # Traffic matrices - tms = scenario.traffic_matrix_set - matrix_count = len(tms.matrices) + # Demand sets + ds = scenario.demand_set + set_count = len(ds.sets) total_demands = 0 total_demand_volume = 0.0 - for demands in tms.matrices.values(): + for demands in ds.sets.values(): total_demands += len(demands) for d in demands: - total_demand_volume += float(getattr(d, "demand", 0.0)) + total_demand_volume += float(getattr(d, "volume", 0.0)) util = ( (total_demand_volume / total_enabled_capacity) @@ -950,8 +952,8 @@ def _inspect_scenario(path: Path, detail: bool = False) -> None: ], ["Capacity (enabled)", f"{total_enabled_capacity:,.1f}"], [ - "Demand (all matrices)", - f"{total_demand_volume:,.1f} ({total_demands:,} demands across {matrix_count} matrices)", + "Demand (all sets)", + f"{total_demand_volume:,.1f} ({total_demands:,} demands across {set_count} sets)", ], ["Utilization", f"{util:,.2%}"], ["Risk groups", f"{rg_total} total; {rg_disabled} disabled"], @@ -993,9 +995,9 @@ def _inspect_scenario(path: Path, detail: bool = False) -> None: # Failure Policies Analysis _print_failure_policies(scenario.failure_policy_set, detail) - # Traffic Matrices Analysis - _print_traffic_matrices( - network, scenario.traffic_matrix_set, detail, total_enabled_link_capacity + # Demand Sets Analysis + _print_demand_sets( + network, scenario.demand_set, detail, total_enabled_link_capacity ) # Workflow Analysis diff --git a/ngraph/dsl/blueprints/expand.py b/ngraph/dsl/blueprints/expand.py index 3ec1701..7f5ae03 100644 --- a/ngraph/dsl/blueprints/expand.py +++ b/ngraph/dsl/blueprints/expand.py @@ -4,11 +4,22 @@ import copy from dataclasses import dataclass, field -from typing import Any, Dict, List, Set +from typing import Any, Dict, List, Optional, Set from ngraph.dsl.blueprints import parser as _bp_parse -from ngraph.dsl.expansion import ExpansionSpec, expand_risk_group_refs, expand_templates -from ngraph.dsl.selectors import normalize_selector, select_nodes +from ngraph.dsl.expansion import ( + ExpansionSpec, + expand_block, + expand_risk_group_refs, + expand_templates, +) +from ngraph.dsl.selectors import ( + evaluate_conditions, + flatten_link_attrs, + normalize_selector, + parse_match_spec, + select_nodes, +) from ngraph.model.network import Link, Network, Node @@ -16,23 +27,18 @@ class Blueprint: """Represents a reusable blueprint for hierarchical sub-topologies. - A blueprint may contain multiple groups of nodes (each can have a node_count - and a name_template), plus adjacency rules describing how those groups connect. + A blueprint may contain multiple node definitions (each can have count + and template), plus link definitions describing how those nodes connect. Attributes: - name (str): Unique identifier of this blueprint. - groups (Dict[str, Any]): A mapping of group_name -> group definition. - Allowed top-level keys in each group definition here are the same - as in normal group definitions (e.g. node_count, name_template, - attrs, disabled, risk_groups, or nested use_blueprint references, etc.). - adjacency (List[Dict[str, Any]]): A list of adjacency definitions - describing how these groups are linked, using the DSL fields - (source, target, pattern, link_params, etc.). + name: Unique identifier of this blueprint. + nodes: A mapping of node_name -> node definition. + links: A list of link definitions. """ name: str - groups: Dict[str, Any] - adjacency: List[Dict[str, Any]] + nodes: Dict[str, Any] + links: List[Dict[str, Any]] @dataclass @@ -41,15 +47,14 @@ class DSLExpansionContext: to be populated during DSL expansion. Attributes: - blueprints (Dict[str, Blueprint]): Dictionary of blueprint-name -> Blueprint. - network (Network): The Network into which expanded nodes/links are inserted. - pending_bp_adj (List[tuple[Dict[str, Any], str]]): Deferred blueprint adjacency - expansions collected as (adj_def, parent_path) to be processed later. + blueprints: Dictionary of blueprint-name -> Blueprint. + network: The Network into which expanded nodes/links are inserted. + pending_bp_links: Deferred blueprint link expansions. """ blueprints: Dict[str, Blueprint] network: Network - pending_bp_adj: List[tuple[Dict[str, Any], str]] = field(default_factory=list) + pending_bp_links: List[tuple[Dict[str, Any], str]] = field(default_factory=list) def expand_network_dsl(data: Dict[str, Any]) -> Network: @@ -58,33 +63,27 @@ def expand_network_dsl(data: Dict[str, Any]) -> Network: Overall flow: 1) Parse "blueprints" into Blueprint objects. 2) Build a Network from "network" metadata (e.g. name, version). - 3) Expand 'network["groups"]' (collect blueprint adjacencies for later). - - If a group references a blueprint, incorporate that blueprint's subgroups - while merging parent's attrs + disabled + risk_groups into subgroups. - Blueprint adjacency is deferred and processed after node overrides. + 3) Expand 'network["nodes"]' (collect blueprint links for later). + - If a node group references a blueprint, incorporate that blueprint's + nodes while merging parent's attrs + disabled + risk_groups. + Blueprint links are deferred and processed after node rules. - Otherwise, directly create nodes (a "direct node group"). - 4) Process any direct node definitions (network["nodes"]). - 5) Process node overrides (in order if multiple overrides match). - 6) Expand deferred blueprint adjacencies. - 7) Expand adjacency definitions in 'network["adjacency"]'. - 8) Process any direct link definitions (network["links"]). - 9) Process link overrides (in order if multiple overrides match). + 4) Process node rules (in order if multiple rules match). + 5) Expand deferred blueprint links. + 6) Expand link definitions in 'network["links"]'. + 7) Process link rules (in order if multiple rules match). Field validation rules: - - Only certain top-level fields are permitted in each structure. Any extra - keys raise a ValueError. "attrs" is where arbitrary user fields go. - - For link_params, recognized fields are "capacity", "cost", "disabled", - "risk_groups", "attrs". Everything else must go inside link_params["attrs"]. - - For node/group definitions, recognized fields include "node_count", - "name_template", "attrs", "disabled", "risk_groups" or "use_blueprint" - for blueprint-based groups. + - Only certain top-level fields are permitted in each structure. + - Link properties are flat (capacity, cost, etc. at link level). + - For node definitions: count, template, attrs, disabled, risk_groups, + or blueprint for blueprint-based nodes. Args: - data (Dict[str, Any]): The YAML-parsed dictionary containing - optional "blueprints" + "network". + data: The YAML-parsed dictionary containing optional "blueprints" + "network". Returns: - Network: The expanded Network object with all nodes and links. + The expanded Network object with all nodes and links. """ # 1) Parse blueprint definitions blueprint_map: Dict[str, Blueprint] = {} @@ -98,13 +97,13 @@ def expand_network_dsl(data: Dict[str, Any]) -> Network: ) _bp_parse.check_no_extra_keys( bp_data, - allowed={"groups", "adjacency"}, + allowed={"nodes", "links"}, context=f"blueprint '{bp_name}'", ) blueprint_map[bp_name] = Blueprint( name=bp_name, - groups=bp_data.get("groups", {}), - adjacency=bp_data.get("adjacency", []), + nodes=bp_data.get("nodes", {}), + links=bp_data.get("links", []), ) # 2) Initialize the Network from "network" metadata @@ -118,12 +117,10 @@ def expand_network_dsl(data: Dict[str, Any]) -> Network: if key not in ( "name", "version", - "groups", "nodes", - "adjacency", "links", - "link_overrides", - "node_overrides", + "link_rules", + "node_rules", ): raise ValueError(f"Unrecognized top-level key in 'network': {key}") @@ -135,69 +132,69 @@ def expand_network_dsl(data: Dict[str, Any]) -> Network: # Create a context ctx = DSLExpansionContext(blueprints=blueprint_map, network=net) - # 3) Expand top-level groups - for group_name, group_def in network_data.get("groups", {}).items(): - if not isinstance(group_def, dict): - raise ValueError(f"Group definition for '{group_name}' must be a dict.") - _expand_group(ctx, parent_path="", group_name=group_name, group_def=group_def) - - # 4) Process direct node definitions - _process_direct_nodes(ctx.network, network_data) - - # 5) Process node overrides early so they influence adjacency selection - _process_node_overrides(ctx.network, network_data) + # 3) Expand top-level node definitions + for node_name, node_def in network_data.get("nodes", {}).items(): + if not isinstance(node_def, dict): + raise ValueError(f"Node definition for '{node_name}' must be a dict.") + _expand_node_group( + ctx, parent_path="", group_name=node_name, group_def=node_def + ) - # 6) Expand deferred blueprint adjacencies - for _adj_def, _parent in ctx.pending_bp_adj: - _expand_blueprint_adjacency(ctx, _adj_def, _parent) + # 4) Process node rules early so they influence link selection + _process_node_rules(ctx.network, network_data) - # 7) Expand top-level adjacency definitions - for adj_def in network_data.get("adjacency", []): - if not isinstance(adj_def, dict): - raise ValueError("Each adjacency entry must be a dictionary.") - _expand_adjacency(ctx, adj_def) + # 5) Expand deferred blueprint links + for _link_def, _parent in ctx.pending_bp_links: + _expand_blueprint_link(ctx, _link_def, _parent) - # 8) Process direct link definitions - _process_direct_links(ctx.network, network_data) + # 6) Expand top-level link definitions + for link_def in network_data.get("links", []): + if not isinstance(link_def, dict): + raise ValueError("Each link entry must be a dictionary.") + _expand_link(ctx, link_def) - # 9) Process link overrides (in order) - _process_link_overrides(ctx.network, network_data) + # 7) Process link rules (in order) + _process_link_rules(ctx.network, network_data) return net -def _expand_group( +def _expand_node_group( ctx: DSLExpansionContext, parent_path: str, group_name: str, group_def: Dict[str, Any], - inherited_risk_groups: Set[str] | None = None, + inherited_risk_groups: Optional[Set[str]] = None, ) -> None: - """Expands a single group definition into either: - - Another blueprint's subgroups, or - - A direct node group (with node_count, etc.), + """Expands a single node definition into either: + - Another blueprint's nodes, or + - Nested nodes (inline hierarchy), or + - A direct node group (with count, etc.), - Possibly replicating itself if group_name has bracket expansions. - If 'use_blueprint' is present, we expand that blueprint. Otherwise, we - create nodes directly. + If 'blueprint' is present, we expand that blueprint. If 'nodes' is present, + we recurse for nested groups. Otherwise, we create nodes directly. For blueprint usage: - Allowed keys: {"use_blueprint", "parameters", "attrs", "disabled", "risk_groups"}. - We merge 'attrs', 'disabled', and 'risk_groups' from this parent group - into each blueprint subgroup's definition. + Allowed keys: {"blueprint", "params", "attrs", "disabled", "risk_groups"}. + We merge 'attrs', 'disabled', and 'risk_groups' from this parent + into each blueprint node definition. - For direct node groups (no 'use_blueprint'): - Allowed keys: {"node_count", "name_template", "attrs", "disabled", "risk_groups"}. + For nested nodes: + Allowed keys: {"nodes", "attrs", "disabled", "risk_groups"}. + + For direct node groups (no 'blueprint', no 'nodes'): + Allowed keys: {"count", "template", "attrs", "disabled", "risk_groups"}. If group_name includes bracket expansions like "fa[1-2]", it replicates the same group_def for each expanded name. Args: - ctx (DSLExpansionContext): The context containing blueprint info and the Network. - parent_path (str): The parent path in the hierarchy. - group_name (str): The current group's name (may have bracket expansions). - group_def (Dict[str, Any]): The group definition (node_count, name_template, etc.). - inherited_risk_groups (Set[str]): Risk groups inherited from a higher-level group. + ctx: The context containing blueprint info and the Network. + parent_path: The parent path in the hierarchy. + group_name: The current group's name (may have bracket expansions). + group_def: The node definition (count, template, etc.). + inherited_risk_groups: Risk groups inherited from a higher-level group. """ if inherited_risk_groups is None: inherited_risk_groups = set() @@ -205,7 +202,7 @@ def _expand_group( # If bracket expansions exist, replicate for each expansion if len(expanded_names) > 1 or expanded_names[0] != group_name: for expanded_name in expanded_names: - _expand_group( + _expand_node_group( ctx, parent_path, expanded_name, group_def, inherited_risk_groups ) return @@ -216,23 +213,23 @@ def _expand_group( else: effective_path = group_name - if "use_blueprint" in group_def: + if "blueprint" in group_def: # Blueprint usage => recognized keys _bp_parse.check_no_extra_keys( group_def, - allowed={"use_blueprint", "parameters", "attrs", "disabled", "risk_groups"}, - context=f"group '{group_name}' using blueprint", + allowed={"blueprint", "params", "attrs", "disabled", "risk_groups"}, + context=f"node '{group_name}' using blueprint", ) - blueprint_name: str = group_def["use_blueprint"] + blueprint_name: str = group_def["blueprint"] bp = ctx.blueprints.get(blueprint_name) if not bp: raise ValueError( - f"Group '{group_name}' references unknown blueprint '{blueprint_name}'." + f"Node '{group_name}' references unknown blueprint '{blueprint_name}'." ) parent_attrs = copy.deepcopy(group_def.get("attrs", {})) if not isinstance(parent_attrs, dict): - raise ValueError(f"'attrs' must be a dict in group '{group_name}'.") + raise ValueError(f"'attrs' must be a dict in node '{group_name}'.") parent_disabled = bool(group_def.get("disabled", False)) # Merge parent's risk_groups @@ -241,17 +238,17 @@ def _expand_group( rg_val = group_def["risk_groups"] if not isinstance(rg_val, (list, set)): raise ValueError( - f"'risk_groups' must be list or set in group '{group_name}'." + f"'risk_groups' must be list or set in node '{group_name}'." ) parent_risk_groups |= expand_risk_group_refs(rg_val) - param_overrides: Dict[str, Any] = group_def.get("parameters", {}) + param_overrides: Dict[str, Any] = group_def.get("params", {}) if not isinstance(param_overrides, dict): - raise ValueError(f"'parameters' must be a dict in group '{group_name}'.") + raise ValueError(f"'params' must be a dict in node '{group_name}'.") - # For each subgroup in the blueprint, apply param overrides and + # For each node in the blueprint, apply param overrides and # merge parent's attrs/disabled/risk_groups - for bp_sub_name, bp_sub_def in bp.groups.items(): + for bp_sub_name, bp_sub_def in bp.nodes.items(): merged_def = _apply_parameters(bp_sub_name, bp_sub_def, param_overrides) merged_def = dict(merged_def) # ensure we can mutate @@ -263,7 +260,7 @@ def _expand_group( child_attrs = merged_def.get("attrs", {}) if not isinstance(child_attrs, dict): raise ValueError( - f"Subgroup '{bp_sub_name}' has non-dict 'attrs' inside blueprint '{blueprint_name}'." + f"Node '{bp_sub_name}' has non-dict 'attrs' inside blueprint '{blueprint_name}'." ) merged_def["attrs"] = {**parent_attrs, **child_attrs} @@ -272,7 +269,7 @@ def _expand_group( merged_def["risk_groups"] = parent_risk_groups | child_rgs # Recursively expand - _expand_group( + _expand_node_group( ctx, parent_path=effective_path, group_name=bp_sub_name, @@ -280,27 +277,75 @@ def _expand_group( inherited_risk_groups=merged_def["risk_groups"], ) - # Defer blueprint adjacency under this parent's path to run after node overrides - for adj_def in bp.adjacency: - ctx.pending_bp_adj.append((adj_def, effective_path)) + # Defer blueprint links under this parent's path to run after node rules + for link_def in bp.links: + ctx.pending_bp_links.append((link_def, effective_path)) - else: - # Direct node group => recognized keys + elif "nodes" in group_def: + # Nested nodes => recognized keys _bp_parse.check_no_extra_keys( group_def, - allowed={"node_count", "name_template", "attrs", "disabled", "risk_groups"}, - context=f"group '{group_name}'", + allowed={"nodes", "attrs", "disabled", "risk_groups"}, + context=f"nested node '{group_name}'", ) - node_count = group_def.get("node_count", 1) - name_template = group_def.get("name_template", f"{group_name}-{{node_num}}") - if not isinstance(node_count, int) or node_count < 1: - raise ValueError( - f"group '{group_name}' has invalid node_count: {node_count}" + + parent_attrs = copy.deepcopy(group_def.get("attrs", {})) + parent_disabled = bool(group_def.get("disabled", False)) + + # Merge parent's risk_groups + parent_risk_groups = set(inherited_risk_groups) + if "risk_groups" in group_def: + rg_val = group_def["risk_groups"] + if not isinstance(rg_val, (list, set)): + raise ValueError( + f"'risk_groups' must be list or set in node '{group_name}'." + ) + parent_risk_groups |= expand_risk_group_refs(rg_val) + + # Recursively process nested nodes + nested_nodes = group_def["nodes"] + if not isinstance(nested_nodes, dict): + raise ValueError(f"'nodes' must be a dict in '{group_name}'.") + + for nested_name, nested_def in nested_nodes.items(): + if not isinstance(nested_def, dict): + raise ValueError( + f"Nested node definition for '{nested_name}' must be a dict." + ) + merged_def = dict(nested_def) + + # Force disabled if parent is disabled + if parent_disabled: + merged_def["disabled"] = True + + # Merge parent's attrs + child_attrs = merged_def.get("attrs", {}) + if not isinstance(child_attrs, dict): + child_attrs = {} + merged_def["attrs"] = {**parent_attrs, **child_attrs} + + # Merge parent's risk_groups with child's + child_rgs = expand_risk_group_refs(merged_def.get("risk_groups", [])) + merged_def["risk_groups"] = parent_risk_groups | child_rgs + + _expand_node_group( + ctx, + parent_path=effective_path, + group_name=nested_name, + group_def=merged_def, + inherited_risk_groups=merged_def["risk_groups"], ) + else: + # Direct node group => recognized keys + _bp_parse.check_no_extra_keys( + group_def, + allowed={"count", "template", "attrs", "disabled", "risk_groups"}, + context=f"node '{group_name}'", + ) combined_attrs = copy.deepcopy(group_def.get("attrs", {})) if not isinstance(combined_attrs, dict): - raise ValueError(f"attrs must be a dict in group '{group_name}'.") + raise ValueError(f"attrs must be a dict in node '{group_name}'.") group_disabled = bool(group_def.get("disabled", False)) # Merge parent's risk groups @@ -308,22 +353,43 @@ def _expand_group( child_rgs = expand_risk_group_refs(group_def.get("risk_groups", [])) final_risk_groups = parent_risk_groups | child_rgs - for i in range(1, node_count + 1): - label = name_template.format(node_num=i) - node_name = f"{effective_path}/{label}" if effective_path else label + # Check if this is a simple single node (no count, no template) + has_count = "count" in group_def + has_template = "template" in group_def + if not has_count and not has_template: + # Simple single node - use effective_path as the node name node = Node( - name=node_name, + name=effective_path, disabled=group_disabled, attrs=copy.deepcopy(combined_attrs), ) node.attrs.setdefault("type", "node") node.risk_groups = final_risk_groups.copy() ctx.network.add_node(node) + else: + # Node group with count/template - create numbered nodes + count = group_def.get("count", 1) + template = group_def.get("template", f"{group_name}-{{n}}") + if not isinstance(count, int) or count < 1: + raise ValueError(f"node '{group_name}' has invalid count: {count}") + + for i in range(1, count + 1): + label = template.format(n=i) + node_name = f"{effective_path}/{label}" if effective_path else label + + node = Node( + name=node_name, + disabled=group_disabled, + attrs=copy.deepcopy(combined_attrs), + ) + node.attrs.setdefault("type", "node") + node.risk_groups = final_risk_groups.copy() + ctx.network.add_node(node) -def _normalize_adjacency_selector(sel: Any, base: str) -> Dict[str, Any]: - """Normalize a source/target selector for adjacency expansion. +def _normalize_link_selector(sel: Any, base: str) -> Dict[str, Any]: + """Normalize a source/target selector for link expansion. Args: sel: String path or dict with 'path', 'group_by', and/or 'match'. @@ -352,103 +418,93 @@ def _normalize_adjacency_selector(sel: Any, base: str) -> Dict[str, Any]: out["path"] = _bp_parse.join_paths(base, path) return out raise ValueError( - "Adjacency 'source'/'target' must be string or object with " + "Link 'source'/'target' must be string or object with " "'path', 'group_by', or 'match'." ) -def _expand_blueprint_adjacency( +def _expand_blueprint_link( ctx: DSLExpansionContext, - adj_def: Dict[str, Any], + link_def: Dict[str, Any], parent_path: str, ) -> None: - """Expands adjacency definitions from within a blueprint, using parent_path - as the local root. This also handles optional expand_vars for repeated adjacency. - - Recognized adjacency keys: - {"source", "target", "pattern", "link_count", "link_params", - "expand_vars", "expansion_mode"}. + """Expands link definitions from within a blueprint, using parent_path + as the local root. Handles optional expand: block for repeated links. Args: - ctx (DSLExpansionContext): The context object with blueprint info and the network. - adj_def (Dict[str, Any]): The adjacency definition inside the blueprint. - parent_path (str): The path serving as the base for the blueprint's node paths. + ctx: The context object with blueprint info and the network. + link_def: The link definition inside the blueprint. + parent_path: The path serving as the base for the blueprint's node paths. """ - _bp_parse.check_adjacency_keys(adj_def, context="blueprint adjacency") - expand_vars = adj_def.get("expand_vars", {}) - if expand_vars: - _expand_adjacency_with_variables(ctx, adj_def, parent_path) - return + _bp_parse.check_link_keys(link_def, context="blueprint link") - source_rel = adj_def["source"] - target_rel = adj_def["target"] - pattern = adj_def.get("pattern", "mesh") - link_params = adj_def.get("link_params", {}) - _bp_parse.check_link_params(link_params, context="blueprint adjacency") - link_count = adj_def.get("link_count", 1) + # Check for expand block + expand_spec = ExpansionSpec.from_dict(link_def) + if expand_spec and not expand_spec.is_empty(): + _expand_link_with_variables(ctx, link_def, parent_path) + return - src_sel = _normalize_adjacency_selector(source_rel, parent_path) - tgt_sel = _normalize_adjacency_selector(target_rel, parent_path) + source_rel = link_def["source"] + target_rel = link_def["target"] + pattern = link_def.get("pattern", "mesh") + count = link_def.get("count", 1) - _expand_adjacency_pattern(ctx, src_sel, tgt_sel, pattern, link_params, link_count) + src_sel = _normalize_link_selector(source_rel, parent_path) + tgt_sel = _normalize_link_selector(target_rel, parent_path) + _expand_link_pattern(ctx, src_sel, tgt_sel, pattern, link_def, count) -def _expand_adjacency(ctx: DSLExpansionContext, adj_def: Dict[str, Any]) -> None: - """Expands a top-level adjacency definition from 'network.adjacency'. If 'expand_vars' - is provided, we expand the source/target as templates repeatedly. - Recognized adjacency keys: - {"source", "target", "pattern", "link_count", "link_params", - "expand_vars", "expansion_mode"}. +def _expand_link(ctx: DSLExpansionContext, link_def: Dict[str, Any]) -> None: + """Expands a top-level link definition from 'network.links'. + If expand: block is provided, we expand the source/target as templates. Args: - ctx (DSLExpansionContext): The context containing the target network. - adj_def (Dict[str, Any]): The adjacency definition dict. + ctx: The context containing the target network. + link_def: The link definition dict. """ - _bp_parse.check_adjacency_keys(adj_def, context="top-level adjacency") - expand_vars = adj_def.get("expand_vars", {}) - if expand_vars: - _expand_adjacency_with_variables(ctx, adj_def, parent_path="") + _bp_parse.check_link_keys(link_def, context="top-level link") + + # Check for expand block + expand_spec = ExpansionSpec.from_dict(link_def) + if expand_spec and not expand_spec.is_empty(): + _expand_link_with_variables(ctx, link_def, parent_path="") return - source_raw = adj_def["source"] - target_raw = adj_def["target"] - pattern = adj_def.get("pattern", "mesh") - link_count = adj_def.get("link_count", 1) - link_params = adj_def.get("link_params", {}) - _bp_parse.check_link_params(link_params, context="top-level adjacency") + source_raw = link_def["source"] + target_raw = link_def["target"] + pattern = link_def.get("pattern", "mesh") + count = link_def.get("count", 1) - src_sel = _normalize_adjacency_selector(source_raw, "") - tgt_sel = _normalize_adjacency_selector(target_raw, "") + src_sel = _normalize_link_selector(source_raw, "") + tgt_sel = _normalize_link_selector(target_raw, "") - _expand_adjacency_pattern(ctx, src_sel, tgt_sel, pattern, link_params, link_count) + _expand_link_pattern(ctx, src_sel, tgt_sel, pattern, link_def, count) -def _expand_adjacency_with_variables( - ctx: DSLExpansionContext, adj_def: Dict[str, Any], parent_path: str +def _expand_link_with_variables( + ctx: DSLExpansionContext, link_def: Dict[str, Any], parent_path: str ) -> None: - """Handles adjacency expansions when 'expand_vars' is provided. + """Handles link expansions when 'expand' block is provided. Substitutes variables into 'source' and 'target' templates using $var or ${var} - syntax to produce multiple adjacency expansions. Supports both string paths + syntax to produce multiple link expansions. Supports both string paths and dict selectors (with path/group_by). Args: ctx: The DSL expansion context. - adj_def: The adjacency definition including expand_vars, source, target, etc. + link_def: The link definition including expand block, source, target, etc. parent_path: Prepended to source/target paths. """ - source_template = adj_def["source"] - target_template = adj_def["target"] - pattern = adj_def.get("pattern", "mesh") - link_params = adj_def.get("link_params", {}) - _bp_parse.check_link_params(link_params, context="adjacency with expand_vars") - link_count = adj_def.get("link_count", 1) - expand_vars = adj_def["expand_vars"] - expansion_mode = adj_def.get("expansion_mode", "cartesian") - - # Build expansion spec - spec = ExpansionSpec(expand_vars=expand_vars, expansion_mode=expansion_mode) + source_template = link_def["source"] + target_template = link_def["target"] + pattern = link_def.get("pattern", "mesh") + count = link_def.get("count", 1) + + # Get expansion spec from expand: block + expand_spec = ExpansionSpec.from_dict(link_def) + if expand_spec is None: + expand_spec = ExpansionSpec(vars={}, mode="cartesian") # Collect all string fields that need variable substitution templates = _extract_selector_templates(source_template, "source") @@ -456,20 +512,16 @@ def _expand_adjacency_with_variables( if not templates: # No variables to expand - just process once - src_sel = _normalize_adjacency_selector(source_template, parent_path) - tgt_sel = _normalize_adjacency_selector(target_template, parent_path) - _expand_adjacency_pattern( - ctx, src_sel, tgt_sel, pattern, link_params, link_count - ) + src_sel = _normalize_link_selector(source_template, parent_path) + tgt_sel = _normalize_link_selector(target_template, parent_path) + _expand_link_pattern(ctx, src_sel, tgt_sel, pattern, link_def, count) return # Expand templates and rebuild selectors - for substituted in expand_templates(templates, spec): + for substituted in expand_templates(templates, expand_spec): src_sel = _rebuild_selector(source_template, substituted, "source", parent_path) tgt_sel = _rebuild_selector(target_template, substituted, "target", parent_path) - _expand_adjacency_pattern( - ctx, src_sel, tgt_sel, pattern, link_params, link_count - ) + _expand_link_pattern(ctx, src_sel, tgt_sel, pattern, link_def, count) def _extract_selector_templates(selector: Any, prefix: str) -> Dict[str, str]: @@ -508,15 +560,15 @@ def _rebuild_selector( raise ValueError(f"Selector must be string or dict, got {type(original)}") -def _expand_adjacency_pattern( +def _expand_link_pattern( ctx: DSLExpansionContext, source_selector: Any, target_selector: Any, pattern: str, - link_params: Dict[str, Any], - link_count: int = 1, + link_def: Dict[str, Any], + count: int = 1, ) -> None: - """Generates Link objects for the chosen adjacency pattern among matched nodes. + """Generates Link objects for the chosen link pattern among matched nodes. Supported Patterns: * "mesh": Connect every source node to every target node @@ -525,19 +577,19 @@ def _expand_adjacency_pattern( using wrap-around. The larger set size must be a multiple of the smaller set size. - link_params must only contain recognized keys: capacity, cost, disabled, - risk_groups, attrs. + Link properties are now flat in link_def (capacity, cost, disabled, + risk_groups, attrs). Args: ctx: The context with the target network. source_selector: Path string or selector object {path, group_by, match}. target_selector: Path string or selector object {path, group_by, match}. pattern: "mesh" or "one_to_one". - link_params: Additional link parameters. - link_count: Number of parallel links to create for each adjacency. + link_def: Link definition with flat properties. + count: Number of parallel links to create for each pair. """ - source_nodes = _select_adjacency_nodes(ctx.network, source_selector) - target_nodes = _select_adjacency_nodes(ctx.network, target_selector) + source_nodes = _select_link_nodes(ctx.network, source_selector) + target_nodes = _select_link_nodes(ctx.network, target_selector) if not source_nodes or not target_nodes: return @@ -552,7 +604,7 @@ def _expand_adjacency_pattern( pair = (min(sn.name, tn.name), max(sn.name, tn.name)) if pair not in dedup_pairs: dedup_pairs.add(pair) - _create_link(ctx.network, sn.name, tn.name, link_params, link_count) + _create_link(ctx.network, sn.name, tn.name, link_def, count) elif pattern == "one_to_one": s_count = len(source_nodes) @@ -580,15 +632,15 @@ def _expand_adjacency_pattern( pair = (min(src_name, tgt_name), max(src_name, tgt_name)) if pair not in dedup_pairs: dedup_pairs.add(pair) - _create_link(ctx.network, src_name, tgt_name, link_params, link_count) + _create_link(ctx.network, src_name, tgt_name, link_def, count) else: - raise ValueError(f"Unknown adjacency pattern: {pattern}") + raise ValueError(f"Unknown link pattern: {pattern}") -def _select_adjacency_nodes(network: Network, selector: Any) -> List[Node]: - """Select nodes for adjacency based on selector. +def _select_link_nodes(network: Network, selector: Any) -> List[Node]: + """Select nodes for link creation based on selector. - Uses the unified selector system. For adjacency, active_only defaults + Uses the unified selector system. For links, active_only defaults to False (links to disabled nodes are created). Args: @@ -607,31 +659,27 @@ def _create_link( net: Network, source: str, target: str, - link_params: Dict[str, Any], - link_count: int = 1, + link_def: Dict[str, Any], + count: int = 1, ) -> None: - """Creates and adds one or more Links to the network, applying capacity, cost, - disabled, risk_groups, and attrs from link_params if present. + """Creates and adds one or more Links to the network. + + Link properties are now flat in link_def (capacity, cost, disabled, + risk_groups, attrs). Args: - net (Network): The network to which the new link(s) will be added. - source (str): Source node name for the link. - target (str): Target node name for the link. - link_params (Dict[str, Any]): Dict possibly containing - 'capacity', 'cost', 'disabled', 'risk_groups', 'attrs'. - link_count (int): Number of parallel links to create between source and target. + net: The network to which the new link(s) will be added. + source: Source node name for the link. + target: Target node name for the link. + link_def: Dict with flat link properties. + count: Number of parallel links to create between source and target. """ - _bp_parse.check_link_params( - link_params, context=f"creating link {source}->{target}" - ) - - for _ in range(link_count): - capacity = link_params.get("capacity", 1.0) - cost = link_params.get("cost", 1.0) - attrs = copy.deepcopy(link_params.get("attrs", {})) - disabled_flag = bool(link_params.get("disabled", False)) - # If link_params has risk_groups, we set them (replace). - link_rgs = expand_risk_group_refs(link_params.get("risk_groups", [])) + for _ in range(count): + capacity = link_def.get("capacity", 1.0) + cost = link_def.get("cost", 1.0) + attrs = copy.deepcopy(link_def.get("attrs", {})) + disabled_flag = bool(link_def.get("disabled", False)) + link_rgs = expand_risk_group_refs(link_def.get("risk_groups", [])) link = Link( source=source, @@ -645,200 +693,143 @@ def _create_link( net.add_link(link) -def _process_direct_nodes(net: Network, network_data: Dict[str, Any]) -> None: - """Processes direct node definitions (network_data["nodes"]) and adds them to the network - if they do not already exist. If the node name already exists, we do nothing. - - Allowed top-level keys for each node: {"disabled", "attrs", "risk_groups"}. - Everything else must be placed inside "attrs" or it triggers an error. - - Args: - net (Network): The network to which nodes are added. - network_data (Dict[str, Any]): DSL data possibly containing a "nodes" dict. - """ - nodes_dict = network_data.get("nodes", {}) - if not isinstance(nodes_dict, dict): - raise ValueError("'nodes' must be a mapping (dict) if present.") - - for node_name, raw_def in nodes_dict.items(): - if not isinstance(raw_def, dict): - raise ValueError(f"Node definition for '{node_name}' must be a dict.") - _bp_parse.check_no_extra_keys( - raw_def, - allowed={"disabled", "attrs", "risk_groups"}, - context=f"node '{node_name}'", - ) - - if node_name not in net.nodes: - disabled_flag = bool(raw_def.get("disabled", False)) - attrs_dict = raw_def.get("attrs", {}) - if not isinstance(attrs_dict, dict): - raise ValueError(f"'attrs' must be a dict in node '{node_name}'.") - # risk_groups => set them if provided (with bracket expansion) - rgs = expand_risk_group_refs(raw_def.get("risk_groups", [])) - - new_node = Node( - name=node_name, - disabled=disabled_flag, - attrs=copy.deepcopy(attrs_dict), - ) - new_node.attrs.setdefault("type", "node") - new_node.risk_groups = rgs - net.add_node(new_node) - +def _process_node_rules(net: Network, network_data: Dict[str, Any]) -> None: + """Processes the 'node_rules' section of the network DSL, updating + existing nodes with new attributes in bulk. Rules are applied in order + if multiple items match the same node. -def _process_direct_links(net: Network, network_data: Dict[str, Any]) -> None: - """Processes direct link definitions (network_data["links"]) and adds them to the network. + Each rule must have {"path"} plus optionally {"attrs", "disabled", "risk_groups"}. - Each link dict must contain {"source", "target"} plus optionally - {"link_params", "link_count"}. No other top-level keys allowed. - link_params must obey the recognized link_params format (including optional risk_groups). + - If "disabled" is present, we set node.disabled. + - If "risk_groups" is present, we *replace* the node's risk_groups. + - Everything else merges into node.attrs. Args: - net (Network): The network to which links are added. - network_data (Dict[str, Any]): DSL data possibly containing a "links" list. + net: The Network whose nodes will be updated. + network_data: DSL data possibly containing 'node_rules'. """ - links_list = network_data.get("links", []) - if not isinstance(links_list, list): - raise ValueError("'links' must be a list if present.") + node_rules = network_data.get("node_rules", []) + if not isinstance(node_rules, list): + return - for link_info in links_list: - if not isinstance(link_info, dict): - raise ValueError("Each link definition must be a dictionary.") + for rule in node_rules: + if not isinstance(rule, dict): + raise ValueError("Each node_rule must be a dict.") _bp_parse.check_no_extra_keys( - link_info, - allowed={"source", "target", "link_params", "link_count"}, - context="direct link", + rule, + allowed={"path", "attrs", "disabled", "risk_groups", "match", "expand"}, + context="node rule", ) - if "source" not in link_info or "target" not in link_info: - raise ValueError("Each link definition must include 'source' and 'target'.") - - source = link_info["source"] - target = link_info["target"] - if source not in net.nodes or target not in net.nodes: - raise ValueError(f"Link references unknown node(s): {source}, {target}.") - if source == target: - raise ValueError(f"Link cannot have the same source and target: {source}") - - link_params = link_info.get("link_params", {}) - if not isinstance(link_params, dict): - raise ValueError(f"link_params must be a dict for link {source}->{target}.") - link_count = link_info.get("link_count", 1) - if not isinstance(link_count, int) or link_count < 1: - raise ValueError( - f"Invalid link_count={link_count} for link {source}->{target}." - ) - - _create_link(net, source, target, link_params, link_count) - - -def _process_link_overrides(net: Network, network_data: Dict[str, Any]) -> None: - """Processes the 'link_overrides' section of the network DSL, updating - existing links with new parameters. Overrides are applied in order if - multiple items match the same link. + # Handle expand block + expand_spec = ExpansionSpec.from_dict(rule) + if expand_spec and not expand_spec.is_empty(): + for expanded_rule in expand_block(rule, expand_spec): + _apply_node_rule(net, expanded_rule) + else: + _apply_node_rule(net, rule) - Each override must contain {"source", "target", "link_params"} plus - optionally {"any_direction"}. link_params must obey recognized fields - (capacity, cost, disabled, risk_groups, attrs). - If link_params["risk_groups"] is given, it *replaces* the link's existing risk_groups. +def _apply_node_rule(net: Network, rule: Dict[str, Any]) -> None: + """Apply a single node rule to matching nodes.""" + path = rule.get("path", ".*") + match_spec = rule.get("match") + top_level_disabled = rule.get("disabled", None) + rule_risk_groups = rule.get("risk_groups", None) - Args: - net (Network): The Network whose links will be updated. - network_data (Dict[str, Any]): DSL data possibly containing 'link_overrides'. - """ - link_overrides = network_data.get("link_overrides", []) - if not isinstance(link_overrides, list): - return - - for link_override in link_overrides: - if not isinstance(link_override, dict): - raise ValueError("Each link_override must be a dict.") - _bp_parse.check_no_extra_keys( - link_override, - allowed={"source", "target", "link_params", "any_direction"}, - context="link override", - ) - source = link_override["source"] - target = link_override["target"] - link_params = link_override["link_params"] - if not isinstance(link_params, dict): - raise ValueError("link_params must be dict in link override.") - any_direction = link_override.get("any_direction", True) + if "attrs" in rule and not isinstance(rule["attrs"], dict): + raise ValueError("attrs must be a dict in node rule.") - _update_links(net, source, target, link_params, any_direction) + attrs_to_set = copy.deepcopy(rule.get("attrs", {})) + _update_nodes( + net, path, match_spec, attrs_to_set, top_level_disabled, rule_risk_groups + ) -def _process_node_overrides(net: Network, network_data: Dict[str, Any]) -> None: - """Processes the 'node_overrides' section of the network DSL, updating - existing nodes with new attributes in bulk. Overrides are applied in order - if multiple items match the same node. +def _process_link_rules(net: Network, network_data: Dict[str, Any]) -> None: + """Processes the 'link_rules' section of the network DSL, updating + existing links with new parameters. Rules are applied in order if + multiple items match the same link. - Each override must have {"path"} plus optionally {"attrs", "disabled", "risk_groups"}. + Each rule must contain {"source", "target"} plus optionally + {"bidirectional", "capacity", "cost", "disabled", "risk_groups", "attrs", "expand"}. - - If "disabled" is present at top level, we set node.disabled. - - If "risk_groups" is present, we *replace* the node's risk_groups. - - Everything else merges into node.attrs. + If risk_groups is given, it *replaces* the link's existing risk_groups. Args: - net (Network): The Network whose nodes will be updated. - network_data (Dict[str, Any]): DSL data possibly containing 'node_overrides'. + net: The Network whose links will be updated. + network_data: DSL data possibly containing 'link_rules'. """ - node_overrides = network_data.get("node_overrides", []) - if not isinstance(node_overrides, list): + link_rules = network_data.get("link_rules", []) + if not isinstance(link_rules, list): return - for override in node_overrides: - if not isinstance(override, dict): - raise ValueError("Each node_override must be a dict.") + for link_rule in link_rules: + if not isinstance(link_rule, dict): + raise ValueError("Each link_rule must be a dict.") _bp_parse.check_no_extra_keys( - override, - allowed={"path", "attrs", "disabled", "risk_groups"}, - context="node override", + link_rule, + allowed={ + "source", + "target", + "bidirectional", + "capacity", + "cost", + "disabled", + "risk_groups", + "attrs", + "expand", + "link_match", + }, + context="link rule", ) - path = override["path"] - top_level_disabled = override.get("disabled", None) - override_risk_groups = override.get("risk_groups", None) + # Handle expand block + expand_spec = ExpansionSpec.from_dict(link_rule) + if expand_spec and not expand_spec.is_empty(): + for expanded_rule in expand_block(link_rule, expand_spec): + _apply_link_rule(net, expanded_rule) + else: + _apply_link_rule(net, link_rule) - if "attrs" in override and not isinstance(override["attrs"], dict): - raise ValueError("attrs must be a dict in node override.") - attrs_to_set = copy.deepcopy(override.get("attrs", {})) +def _apply_link_rule(net: Network, rule: Dict[str, Any]) -> None: + """Apply a single link rule to matching links.""" + source = rule["source"] + target = rule["target"] + bidirectional = rule.get("bidirectional", True) - # We'll process disabled as a separate boolean - # We'll process risk_groups as a direct replacement if given - _update_nodes(net, path, attrs_to_set, top_level_disabled, override_risk_groups) + _update_links(net, source, target, rule, bidirectional) def _update_links( net: Network, - source: str, - target: str, - link_params: Dict[str, Any], - any_direction: bool = True, + source: Any, + target: Any, + rule: Dict[str, Any], + bidirectional: bool = True, ) -> None: - """Updates all Link objects between nodes matching 'source' and 'target' paths + """Updates all Link objects between nodes matching source and target selectors with new parameters (capacity, cost, disabled, risk_groups, attrs). - If any_direction=True, both (source->target) and (target->source) links + If bidirectional=True, both (source->target) and (target->source) links are updated if present. - If link_params["risk_groups"] is given, it *replaces* the link's existing risk_groups. + If risk_groups is given, it *replaces* the link's existing risk_groups. Args: - net (Network): The network whose links should be updated. - source (str): A path pattern identifying source node group(s). - target (str): A path pattern identifying target node group(s). - link_params (Dict[str, Any]): New parameter values - (capacity, cost, disabled, risk_groups, attrs). - any_direction (bool): If True, also update reversed direction links. + net: The network whose links should be updated. + source: Selector (string path or dict with path/match) for source nodes. + target: Selector (string path or dict with path/match) for target nodes. + rule: Rule dict with flat link properties. + bidirectional: If True, also update reversed direction links. """ - _bp_parse.check_link_params(link_params, context="link override processing") + # Use unified selector system for full selector support + src_sel = normalize_selector(source, context="override") + tgt_sel = normalize_selector(target, context="override") - source_node_groups = net.select_node_groups_by_path(source) - target_node_groups = net.select_node_groups_by_path(target) + source_node_groups = select_nodes(net, src_sel, default_active_only=False) + target_node_groups = select_nodes(net, tgt_sel, default_active_only=False) source_nodes = { node.name for _, nodes in source_node_groups.items() for node in nodes @@ -847,53 +838,78 @@ def _update_links( node.name for _, nodes in target_node_groups.items() for node in nodes } - new_disabled_val = link_params.get("disabled", None) - new_capacity = link_params.get("capacity", None) - new_cost = link_params.get("cost", None) - new_risk_groups = link_params.get("risk_groups", None) - new_attrs = link_params.get("attrs", {}) + new_disabled_val = rule.get("disabled", None) + new_capacity = rule.get("capacity", None) + new_cost = rule.get("cost", None) + new_risk_groups = rule.get("risk_groups", None) + new_attrs = rule.get("attrs", {}) + + # Parse link_match for filtering by link attributes + link_match_raw = rule.get("link_match") + link_match = parse_match_spec(link_match_raw) if link_match_raw else None - for link in net.links.values(): + for link_id, link in net.links.items(): forward_match = link.source in source_nodes and link.target in target_nodes reverse_match = ( - any_direction + bidirectional and link.source in target_nodes and link.target in source_nodes ) - if forward_match or reverse_match: - if new_capacity is not None: - link.capacity = new_capacity - if new_cost is not None: - link.cost = new_cost - if new_disabled_val is not None: - link.disabled = bool(new_disabled_val) - if new_risk_groups is not None: - link.risk_groups = expand_risk_group_refs(new_risk_groups) - if new_attrs: - link.attrs.update(new_attrs) + if not (forward_match or reverse_match): + continue + + # Apply link_match filter if specified + if link_match is not None: + link_attrs = flatten_link_attrs(link, link_id) + if not evaluate_conditions( + link_attrs, link_match.conditions, link_match.logic + ): + continue + + # Apply updates + if new_capacity is not None: + link.capacity = new_capacity + if new_cost is not None: + link.cost = new_cost + if new_disabled_val is not None: + link.disabled = bool(new_disabled_val) + if new_risk_groups is not None: + link.risk_groups = expand_risk_group_refs(new_risk_groups) + if new_attrs: + link.attrs.update(new_attrs) def _update_nodes( net: Network, path: str, + match_spec: Optional[Dict[str, Any]], attrs: Dict[str, Any], disabled_val: Any = None, risk_groups_val: Any = None, ) -> None: - """Updates attributes on all nodes matching a given path pattern. + """Updates attributes on all nodes matching a path pattern and optional match conditions. - If 'disabled_val' is not None, sets node.disabled to that boolean value. - If 'risk_groups_val' is not None, *replaces* the node's risk_groups with that new set. - Everything else in 'attrs' is merged into node.attrs. Args: - net (Network): The network containing the nodes. - path (str): A path pattern identifying which node group(s) to modify. - attrs (Dict[str, Any]): A dictionary of new attributes to set/merge. - disabled_val (Any): Boolean or None for disabling or enabling nodes. - risk_groups_val (Any): List or set or None for replacing node.risk_groups. + net: The network containing the nodes. + path: A path pattern identifying which node group(s) to modify. + match_spec: Optional match conditions dict (with 'conditions' and 'logic'). + attrs: A dictionary of new attributes to set/merge. + disabled_val: Boolean or None for disabling or enabling nodes. + risk_groups_val: List or set or None for replacing node.risk_groups. """ - node_groups = net.select_node_groups_by_path(path) + # Build selector dict with path and optional match + selector_dict: Dict[str, Any] = {"path": path} + if match_spec: + selector_dict["match"] = match_spec + + # Use unified selector system + normalized = normalize_selector(selector_dict, context="override") + node_groups = select_nodes(net, normalized, default_active_only=False) + for _, nodes in node_groups.items(): for node in nodes: if disabled_val is not None: diff --git a/ngraph/dsl/blueprints/parser.py b/ngraph/dsl/blueprints/parser.py index 5c271c9..9d1e5a1 100644 --- a/ngraph/dsl/blueprints/parser.py +++ b/ngraph/dsl/blueprints/parser.py @@ -13,8 +13,7 @@ __all__ = [ "check_no_extra_keys", - "check_adjacency_keys", - "check_link_params", + "check_link_keys", "expand_name_patterns", "join_paths", ] @@ -38,38 +37,26 @@ def check_no_extra_keys( ) -def check_adjacency_keys(adj_def: Dict[str, Any], context: str) -> None: - """Ensure adjacency definitions only contain recognized keys.""" +def check_link_keys(link_def: Dict[str, Any], context: str) -> None: + """Ensure link definitions only contain recognized keys.""" check_no_extra_keys( - adj_def, + link_def, allowed={ "source", "target", "pattern", - "link_count", - "link_params", - "expand_vars", - "expansion_mode", + "count", + "expand", + "capacity", + "cost", + "disabled", + "risk_groups", + "attrs", }, context=context, ) - if "source" not in adj_def or "target" not in adj_def: - raise ValueError(f"Adjacency in {context} must have 'source' and 'target'.") - - -def check_link_params(link_params: Dict[str, Any], context: str) -> None: - """Ensure link_params contain only recognized keys. - - Link attributes may include "hardware" per-end mapping when set under - link_params.attrs. This function only validates top-level link_params keys. - """ - recognized = {"capacity", "cost", "disabled", "risk_groups", "attrs"} - extra = set(link_params.keys()) - recognized - if extra: - raise ValueError( - f"Unrecognized link_params key(s) in {context}: {', '.join(sorted(extra))}. " - f"Allowed: {sorted(recognized)}" - ) + if "source" not in link_def or "target" not in link_def: + raise ValueError(f"Link in {context} must have 'source' and 'target'.") def join_paths(parent_path: str, rel_path: str) -> str: diff --git a/ngraph/dsl/expansion/__init__.py b/ngraph/dsl/expansion/__init__.py index b6238c6..220b480 100644 --- a/ngraph/dsl/expansion/__init__.py +++ b/ngraph/dsl/expansion/__init__.py @@ -4,11 +4,11 @@ bracket pattern expansion for name generation. Usage: - from ngraph.dsl.expansion import expand_templates, expand_name_patterns, ExpansionSpec + from ngraph.dsl.expansion import expand_block, expand_name_patterns, ExpansionSpec - # Variable expansion - spec = ExpansionSpec(expand_vars={"dc": [1, 2, 3]}) - for result in expand_templates({"path": "dc${dc}/leaf"}, spec): + # Variable expansion with expand: block + spec = ExpansionSpec.from_dict({"expand": {"vars": {"dc": [1, 2, 3]}}}) + for result in expand_block({"path": "dc${dc}/leaf"}, spec): print(result) # {"path": "dc1/leaf"}, {"path": "dc2/leaf"}, ... # Bracket expansion @@ -17,13 +17,14 @@ from .brackets import expand_name_patterns, expand_risk_group_refs from .schema import ExpansionSpec -from .variables import expand_templates, substitute_vars +from .variables import expand_block, expand_templates, substitute_vars __all__ = [ # Schema "ExpansionSpec", # Variable expansion "expand_templates", + "expand_block", "substitute_vars", # Bracket expansion "expand_name_patterns", diff --git a/ngraph/dsl/expansion/schema.py b/ngraph/dsl/expansion/schema.py index 28bb684..776ebd9 100644 --- a/ngraph/dsl/expansion/schema.py +++ b/ngraph/dsl/expansion/schema.py @@ -6,7 +6,7 @@ from __future__ import annotations from dataclasses import dataclass, field -from typing import Any, Dict, List, Literal +from typing import Any, Dict, List, Literal, Optional @dataclass @@ -14,15 +14,33 @@ class ExpansionSpec: """Specification for variable-based expansion. Attributes: - expand_vars: Mapping of variable names to lists of values. - expansion_mode: How to combine variable values. + vars: Mapping of variable names to lists of values. + mode: How to combine variable values. - "cartesian": All combinations (default) - "zip": Pair values by position """ - expand_vars: Dict[str, List[Any]] = field(default_factory=dict) - expansion_mode: Literal["cartesian", "zip"] = "cartesian" + vars: Dict[str, List[Any]] = field(default_factory=dict) + mode: Literal["cartesian", "zip"] = "cartesian" def is_empty(self) -> bool: """Check if no variables are defined.""" - return not self.expand_vars + return not self.vars + + @classmethod + def from_dict(cls, data: Dict[str, Any]) -> Optional["ExpansionSpec"]: + """Extract expand: block from dict. + + Args: + data: Dict that may contain an 'expand' key. + + Returns: + ExpansionSpec if 'expand' block present, None otherwise. + """ + if "expand" not in data: + return None + expand = data["expand"] + return cls( + vars=expand.get("vars", {}), + mode=expand.get("mode", "cartesian"), + ) diff --git a/ngraph/dsl/expansion/variables.py b/ngraph/dsl/expansion/variables.py index d47ed36..b606ed1 100644 --- a/ngraph/dsl/expansion/variables.py +++ b/ngraph/dsl/expansion/variables.py @@ -1,14 +1,15 @@ """Variable expansion for templates. -Provides expand_templates() function for substituting $var and ${var} -placeholders in template strings. +Provides substitution of $var and ${var} placeholders in strings, +with recursive substitution in nested structures. """ from __future__ import annotations +import copy import re from itertools import product -from typing import TYPE_CHECKING, Any, Dict, Iterator +from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional if TYPE_CHECKING: from .schema import ExpansionSpec @@ -16,6 +17,7 @@ __all__ = [ "expand_templates", "substitute_vars", + "expand_block", ] # Pattern to match $var or ${var} placeholders @@ -25,11 +27,9 @@ MAX_TEMPLATE_EXPANSIONS = 10_000 -def substitute_vars(template: str, var_dict: Dict[str, Any]) -> str: +def _substitute_string(template: str, var_dict: Dict[str, Any]) -> str: """Substitute $var and ${var} placeholders in a template string. - Uses $ prefix to avoid collision with regex {m,n} quantifiers. - Args: template: String containing $var or ${var} placeholders. var_dict: Mapping of variable names to values. @@ -44,44 +44,51 @@ def substitute_vars(template: str, var_dict: Dict[str, Any]) -> str: def replace(match: re.Match[str]) -> str: var_name = match.group(1) or match.group(2) if var_name not in var_dict: - raise KeyError(f"Variable '${var_name}' not found in expand_vars") + raise KeyError(f"Variable '${var_name}' not found in expand.vars") return str(var_dict[var_name]) return _VAR_PATTERN.sub(replace, template) -def expand_templates( - templates: Dict[str, str], - spec: "ExpansionSpec", -) -> Iterator[Dict[str, str]]: - """Expand template strings with variable substitution. - - Uses $var or ${var} syntax only. +def substitute_vars(obj: Any, var_dict: Dict[str, Any]) -> Any: + """Recursively substitute ${var} in all strings within obj. Args: - templates: Dict of template strings, e.g. {"source": "dc${dc}/...", "sink": "..."}. - spec: Expansion specification with variables and mode. + obj: Any value (string, dict, list, or primitive). + var_dict: Mapping of variable names to values. - Yields: - Dicts with same keys as templates, values substituted. + Returns: + Object with all string values having variables substituted. + """ + if isinstance(obj, str): + return _substitute_string(obj, var_dict) + if isinstance(obj, dict): + return {k: substitute_vars(v, var_dict) for k, v in obj.items()} + if isinstance(obj, list): + return [substitute_vars(item, var_dict) for item in obj] + return obj - Raises: - ValueError: If zip mode has mismatched list lengths or expansion exceeds limit. - KeyError: If a template references an undefined variable. - Example: - >>> spec = ExpansionSpec(expand_vars={"dc": [1, 2]}) - >>> list(expand_templates({"src": "dc${dc}"}, spec)) - [{"src": "dc1"}, {"src": "dc2"}] +def _generate_combinations( + vars_dict: Dict[str, List[Any]], + mode: str, +) -> Iterator[Dict[str, Any]]: + """Generate variable value combinations. + + Args: + vars_dict: Mapping of variable names to value lists. + mode: "cartesian" or "zip". + + Yields: + Dict mapping variable names to values for each combination. """ - if spec.is_empty(): - yield templates + if not vars_dict: return - var_names = sorted(spec.expand_vars.keys()) - var_values = [spec.expand_vars[k] for k in var_names] + var_names = sorted(vars_dict.keys()) + var_values = [vars_dict[k] for k in var_names] - if spec.expansion_mode == "zip": + if mode == "zip": lengths = [len(v) for v in var_values] if len(set(lengths)) != 1: raise ValueError( @@ -104,5 +111,59 @@ def expand_templates( ) for combo in combos: - var_dict = dict(zip(var_names, combo, strict=True)) - yield {k: substitute_vars(v, var_dict) for k, v in templates.items()} + yield dict(zip(var_names, combo, strict=True)) + + +def expand_block( + block: Dict[str, Any], + spec: Optional["ExpansionSpec"], +) -> Iterator[Dict[str, Any]]: + """Expand a DSL block, yielding one dict per variable combination. + + If no expand spec is provided or it has no vars, yields the original block. + Otherwise, yields a deep copy with all strings substituted for each + variable combination. + + Args: + block: DSL block (dict) that may contain template strings. + spec: Optional expansion specification. + + Yields: + Dict with variable substitutions applied. + """ + if spec is None or spec.is_empty(): + yield block + return + + for var_dict in _generate_combinations(spec.vars, spec.mode): + expanded = copy.deepcopy(block) + # Remove the expand block from the result + expanded.pop("expand", None) + yield substitute_vars(expanded, var_dict) + + +def expand_templates( + templates: Dict[str, str], + spec: "ExpansionSpec", +) -> Iterator[Dict[str, str]]: + """Expand template strings with variable substitution. + + Uses $var or ${var} syntax only. + + Args: + templates: Dict of template strings. + spec: Expansion specification with variables and mode. + + Yields: + Dicts with same keys as templates, values substituted. + + Raises: + ValueError: If zip mode has mismatched list lengths or expansion exceeds limit. + KeyError: If a template references an undefined variable. + """ + if spec.is_empty(): + yield templates + return + + for var_dict in _generate_combinations(spec.vars, spec.mode): + yield {k: _substitute_string(v, var_dict) for k, v in templates.items()} diff --git a/ngraph/dsl/loader.py b/ngraph/dsl/loader.py index 31f7960..3fde49c 100644 --- a/ngraph/dsl/loader.py +++ b/ngraph/dsl/loader.py @@ -30,9 +30,9 @@ def load_scenario_yaml(yaml_str: str) -> Dict[str, Any]: raise ValueError("The provided YAML must map to a dictionary at top-level.") # Normalize known sections that suffer from YAML key ambiguities - if isinstance(data.get("traffic_matrix_set"), dict): - data["traffic_matrix_set"] = normalize_yaml_dict_keys( - data["traffic_matrix_set"] # type: ignore[arg-type] + if isinstance(data.get("demands"), dict): + data["demands"] = normalize_yaml_dict_keys( + data["demands"] # type: ignore[arg-type] ) # Early shape checks helpful for better error messages prior to schema validation @@ -56,15 +56,6 @@ def load_scenario_yaml(yaml_str: str) -> Dict[str, Any]: raise ValueError( "Each link definition must include 'source' and 'target'" ) - if isinstance(network_section.get("nodes"), dict): - for _node_name, node_def in network_section["nodes"].items(): - if isinstance(node_def, dict): - allowed = {"attrs", "disabled", "risk_groups"} - for k in node_def.keys(): - if k not in allowed: - raise ValueError( - f"Unrecognized key '{k}' in node '{_node_name}'" - ) if isinstance(data.get("risk_groups"), list): for rg in data["risk_groups"]: @@ -106,12 +97,12 @@ def load_scenario_yaml(yaml_str: str) -> Dict[str, Any]: recognized_keys = { "vars", "blueprints", - "network", - "failure_policy_set", - "traffic_matrix_set", - "workflow", "components", + "network", "risk_groups", + "demands", + "failures", + "workflow", "seed", } extra = set(data.keys()) - recognized_keys diff --git a/ngraph/dsl/selectors/conditions.py b/ngraph/dsl/selectors/conditions.py index ce03c2c..e13746a 100644 --- a/ngraph/dsl/selectors/conditions.py +++ b/ngraph/dsl/selectors/conditions.py @@ -2,7 +2,7 @@ Provides evaluation logic for attribute conditions used in selectors and failure policies. Supports operators: ==, !=, <, <=, >, >=, -contains, not_contains, in, not_in, any_value, no_value. +contains, not_contains, in, not_in, exists, not_exists. Supports dot-notation for nested attribute access (e.g., "hardware.vendor"). """ @@ -70,13 +70,13 @@ def evaluate_condition(attrs: Dict[str, Any], cond: "Condition") -> bool: ValueError: If operator is unknown or value type is invalid. """ has_attr, attr_value = resolve_attr_path(attrs, cond.attr) - op = cond.operator + op = cond.op expected = cond.value # Existence operators - if op == "any_value": + if op == "exists": return has_attr and attr_value is not None - if op == "no_value": + if op == "not_exists": return (not has_attr) or (attr_value is None) # For all other operators, missing/None attribute means no match diff --git a/ngraph/dsl/selectors/normalize.py b/ngraph/dsl/selectors/normalize.py index 3e8499d..d1a305b 100644 --- a/ngraph/dsl/selectors/normalize.py +++ b/ngraph/dsl/selectors/normalize.py @@ -130,13 +130,13 @@ def parse_match_spec( raise ValueError( f"Condition in {context} must be a dict, got {type(cond_dict).__name__}" ) - if "attr" not in cond_dict or "operator" not in cond_dict: - raise ValueError(f"Condition in {context} must have 'attr' and 'operator'") + if "attr" not in cond_dict or "op" not in cond_dict: + raise ValueError(f"Condition in {context} must have 'attr' and 'op'") conditions.append( Condition( attr=cond_dict["attr"], - operator=cond_dict["operator"], + op=cond_dict["op"], value=cond_dict.get("value"), ) ) diff --git a/ngraph/dsl/selectors/schema.py b/ngraph/dsl/selectors/schema.py index 7c400d7..69924a0 100644 --- a/ngraph/dsl/selectors/schema.py +++ b/ngraph/dsl/selectors/schema.py @@ -1,7 +1,7 @@ """Schema definitions for unified node selection. Provides dataclasses for node selection configuration used across -adjacency, demands, overrides, and workflow steps. +network rules, demands, and workflow steps. """ from __future__ import annotations @@ -27,8 +27,8 @@ "not_contains", "in", "not_in", - "any_value", - "no_value", + "exists", + "not_exists", } ) @@ -42,12 +42,12 @@ class Condition: Attributes: attr: Attribute name to match (supports dot-notation for nested attrs). - operator: Comparison operator. - value: Right-hand operand (unused for any_value/no_value). + op: Comparison operator. + value: Right-hand operand (unused for exists/not_exists). """ attr: str - operator: Literal[ + op: Literal[ "==", "!=", "<", @@ -58,15 +58,15 @@ class Condition: "not_contains", "in", "not_in", - "any_value", - "no_value", + "exists", + "not_exists", ] value: Any = None def __post_init__(self) -> None: - if self.operator not in VALID_OPERATORS: + if self.op not in VALID_OPERATORS: raise ValueError( - f"Invalid operator '{self.operator}'. " + f"Invalid operator '{self.op}'. " f"Valid operators: {sorted(VALID_OPERATORS)}" ) diff --git a/ngraph/dsl/selectors/select.py b/ngraph/dsl/selectors/select.py index 76db444..f25535a 100644 --- a/ngraph/dsl/selectors/select.py +++ b/ngraph/dsl/selectors/select.py @@ -247,12 +247,16 @@ def _group_by_attribute( ) -> Dict[str, List["Node"]]: """Re-group nodes by attribute value. + Uses flatten_node_attrs to support both top-level fields (name, disabled, + risk_groups) and custom attrs, consistent with match condition evaluation. + Note: This discards any existing grouping (including regex captures). """ result: Dict[str, List["Node"]] = {} for nodes in groups.values(): for node in nodes: - if attr_name in node.attrs: - key = str(node.attrs[attr_name]) + flat_attrs = flatten_node_attrs(node) + if attr_name in flat_attrs: + key = str(flat_attrs[attr_name]) result.setdefault(key, []).append(node) return result diff --git a/ngraph/model/__init__.py b/ngraph/model/__init__.py index e63d522..99a6c29 100644 --- a/ngraph/model/__init__.py +++ b/ngraph/model/__init__.py @@ -5,7 +5,7 @@ for analysis are handled via node_mask and edge_mask parameters in Core algorithms. """ -from ngraph.model.demand import TrafficDemand, TrafficMatrixSet +from ngraph.model.demand import TrafficDemand from ngraph.model.flow import FlowPolicyPreset from ngraph.model.network import Link, Network, Node, RiskGroup from ngraph.model.path import Path @@ -19,7 +19,6 @@ "Path", # Traffic demands "TrafficDemand", - "TrafficMatrixSet", # Flow configuration "FlowPolicyPreset", ] diff --git a/ngraph/model/demand/__init__.py b/ngraph/model/demand/__init__.py index 97a58fc..2decb9c 100644 --- a/ngraph/model/demand/__init__.py +++ b/ngraph/model/demand/__init__.py @@ -1,20 +1,20 @@ -"""Traffic demand specification and matrix containers. +"""Traffic demand specification and set containers. This package provides data structures for defining traffic demands -and organizing them into named traffic matrix sets. +and organizing them into named demand sets. Public API: - TrafficDemand: Individual demand specification with source/sink selectors - TrafficMatrixSet: Named collection of TrafficDemand lists - build_traffic_matrix_set: Construct TrafficMatrixSet from parsed YAML + TrafficDemand: Individual demand specification with source/target selectors + DemandSet: Named collection of TrafficDemand lists + build_demand_set: Construct DemandSet from parsed YAML """ -from ngraph.model.demand.builder import build_traffic_matrix_set -from ngraph.model.demand.matrix import TrafficMatrixSet +from ngraph.model.demand.builder import build_demand_set +from ngraph.model.demand.matrix import DemandSet from ngraph.model.demand.spec import TrafficDemand __all__ = [ "TrafficDemand", - "TrafficMatrixSet", - "build_traffic_matrix_set", + "DemandSet", + "build_demand_set", ] diff --git a/ngraph/model/demand/builder.py b/ngraph/model/demand/builder.py index 4e01645..0d75c0c 100644 --- a/ngraph/model/demand/builder.py +++ b/ngraph/model/demand/builder.py @@ -1,88 +1,95 @@ -"""Builders for traffic matrices. +"""Builders for demand sets. -Construct `TrafficMatrixSet` from raw dictionaries (e.g. parsed YAML). +Construct `DemandSet` from raw dictionaries (e.g. parsed YAML). """ from __future__ import annotations from typing import Any, Dict, List, Optional -from ngraph.model.demand.matrix import TrafficMatrixSet +from ngraph.dsl.expansion import ExpansionSpec, expand_block +from ngraph.model.demand.matrix import DemandSet from ngraph.model.demand.spec import TrafficDemand from ngraph.model.flow.policy_config import FlowPolicyPreset from ngraph.utils.yaml_utils import normalize_yaml_dict_keys -def build_traffic_matrix_set(raw: Dict[str, List[dict]]) -> TrafficMatrixSet: - """Build a `TrafficMatrixSet` from a mapping of name -> list of dicts. +def build_demand_set(raw: Dict[str, List[dict]]) -> DemandSet: + """Build a `DemandSet` from a mapping of name -> list of dicts. Args: - raw: Mapping where each key is a matrix name and each value is a list of + raw: Mapping where each key is a demand set name and each value is a list of dictionaries with `TrafficDemand` constructor fields. Returns: - Initialized `TrafficMatrixSet` with constructed `TrafficDemand` objects. + Initialized `DemandSet` with constructed `TrafficDemand` objects. Raises: ValueError: If ``raw`` is not a mapping of name -> list[dict], or if required fields are missing. """ if not isinstance(raw, dict): - raise ValueError( - "'traffic_matrix_set' must be a mapping of name -> list[TrafficDemand]" - ) + raise ValueError("'demands' must be a mapping of name -> list[TrafficDemand]") normalized_raw = normalize_yaml_dict_keys(raw) - tms = TrafficMatrixSet() + ds = DemandSet() for name, td_list in normalized_raw.items(): if not isinstance(td_list, list): raise ValueError( - f"Matrix '{name}' must map to a list of TrafficDemand dicts" + f"Demand set '{name}' must map to a list of TrafficDemand dicts" ) coerced: List[TrafficDemand] = [] for d in td_list: if not isinstance(d, dict): raise ValueError( - f"Entries in matrix '{name}' must be dicts, got {type(d).__name__}" + f"Entries in demand set '{name}' must be dicts, " + f"got {type(d).__name__}" ) - # Validate required fields - if "source" not in d or "sink" not in d: - raise ValueError( - f"Each demand in matrix '{name}' requires 'source' and 'sink' fields" - ) + # Handle expand block + expand_spec = ExpansionSpec.from_dict(d) + if expand_spec and not expand_spec.is_empty(): + for expanded in expand_block(d, expand_spec): + coerced.append(_build_demand(expanded, name)) + else: + coerced.append(_build_demand(d, name)) - # Build normalized dict for TrafficDemand constructor - td_kwargs: Dict[str, Any] = { - "source": d["source"], - "sink": d["sink"], - "demand": d.get("demand", 0.0), - "priority": d.get("priority", 0), - "mode": d.get("mode", "combine"), - "group_mode": d.get("group_mode", "flatten"), - "expand_vars": d.get("expand_vars", {}), - "expansion_mode": d.get("expansion_mode", "cartesian"), - "attrs": d.get("attrs", {}), - } - - # Optional id - if "id" in d: - td_kwargs["id"] = d["id"] - - # Coerce flow_policy_config into FlowPolicyPreset enum when provided - if "flow_policy_config" in d: - td_kwargs["flow_policy_config"] = _coerce_flow_policy_config( - d["flow_policy_config"] - ) + ds.add(name, coerced) + + return ds + + +def _build_demand(d: Dict[str, Any], set_name: str) -> TrafficDemand: + """Build a single TrafficDemand from a dict.""" + # Validate required fields + if "source" not in d or "target" not in d: + raise ValueError( + f"Each demand in set '{set_name}' requires 'source' and 'target' fields" + ) + + # Build normalized dict for TrafficDemand constructor + td_kwargs: Dict[str, Any] = { + "source": d["source"], + "target": d["target"], + "volume": d.get("volume", 0.0), + "priority": d.get("priority", 0), + "mode": d.get("mode", "combine"), + "group_mode": d.get("group_mode", "flatten"), + "attrs": d.get("attrs", {}), + } - coerced.append(TrafficDemand(**td_kwargs)) + # Optional id + if "id" in d: + td_kwargs["id"] = d["id"] - tms.add(name, coerced) + # Coerce flow_policy into FlowPolicyPreset enum when provided + if "flow_policy" in d: + td_kwargs["flow_policy"] = _coerce_flow_policy(d["flow_policy"]) - return tms + return TrafficDemand(**td_kwargs) -def _coerce_flow_policy_config(value: Any) -> Optional[FlowPolicyPreset]: +def _coerce_flow_policy(value: Any) -> Optional[FlowPolicyPreset]: """Return a FlowPolicyPreset from various user-friendly forms. Accepts: @@ -102,7 +109,7 @@ def _coerce_flow_policy_config(value: Any) -> Optional[FlowPolicyPreset]: try: return FlowPolicyPreset(value) except Exception as exc: # pragma: no cover - validated by enum - raise ValueError(f"Unknown flow policy config value: {value}") from exc + raise ValueError(f"Unknown flow policy value: {value}") from exc if isinstance(value, str): s = value.strip() if not s: @@ -112,12 +119,12 @@ def _coerce_flow_policy_config(value: Any) -> Optional[FlowPolicyPreset]: try: return FlowPolicyPreset(int(s)) except Exception as exc: - raise ValueError(f"Unknown flow policy config value: {s}") from exc + raise ValueError(f"Unknown flow policy value: {s}") from exc # Enum name lookup (case-insensitive) try: return FlowPolicyPreset[s.upper()] except KeyError as exc: - raise ValueError(f"Unknown flow policy config: {value}") from exc + raise ValueError(f"Unknown flow policy: {value}") from exc # Preserve other structural forms (e.g., dict) for callers that support them return value # type: ignore[return-value] diff --git a/ngraph/model/demand/matrix.py b/ngraph/model/demand/matrix.py index 025db0a..b6c7558 100644 --- a/ngraph/model/demand/matrix.py +++ b/ngraph/model/demand/matrix.py @@ -1,6 +1,6 @@ -"""Traffic matrix containers. +"""Demand set containers. -Provides `TrafficMatrixSet`, a named collection of `TrafficDemand` lists +Provides `DemandSet`, a named collection of `TrafficDemand` lists used as input to demand expansion and placement. This module contains input containers, not analysis results. """ @@ -14,77 +14,77 @@ @dataclass -class TrafficMatrixSet: +class DemandSet: """Named collection of TrafficDemand lists. - This mutable container maps scenario names to lists of TrafficDemand objects, - allowing management of multiple traffic matrices for analysis. + This mutable container maps set names to lists of TrafficDemand objects, + allowing management of multiple demand sets for analysis. Attributes: - matrices: Dictionary mapping scenario names to TrafficDemand lists. + sets: Dictionary mapping set names to TrafficDemand lists. """ - matrices: dict[str, list[TrafficDemand]] = field(default_factory=dict) + sets: dict[str, list[TrafficDemand]] = field(default_factory=dict) def add(self, name: str, demands: list[TrafficDemand]) -> None: - """Add a traffic matrix to the collection. + """Add a demand list to the collection. Args: - name: Scenario name identifier. - demands: List of TrafficDemand objects for this scenario. + name: Set name identifier. + demands: List of TrafficDemand objects for this set. """ - self.matrices[name] = demands + self.sets[name] = demands - def get_matrix(self, name: str) -> list[TrafficDemand]: - """Get a specific traffic matrix by name. + def get_set(self, name: str) -> list[TrafficDemand]: + """Get a specific demand set by name. Args: - name: Name of the matrix to retrieve. + name: Name of the demand set to retrieve. Returns: - List of TrafficDemand objects for the named matrix. + List of TrafficDemand objects for the named set. Raises: - KeyError: If the matrix name doesn't exist. + KeyError: If the set name doesn't exist. """ - return self.matrices[name] + return self.sets[name] - def get_default_matrix(self) -> list[TrafficDemand]: - """Get default traffic matrix. + def get_default_set(self) -> list[TrafficDemand]: + """Get default demand set. - Returns the matrix named 'default' if it exists. If there is exactly - one matrix, returns that single matrix. If there are no matrices, - returns an empty list. If there are multiple matrices and none is + Returns the set named 'default' if it exists. If there is exactly + one set, returns that single set. If there are no sets, + returns an empty list. If there are multiple sets and none is named 'default', raises an error. Returns: - List of TrafficDemand objects for the default matrix. + List of TrafficDemand objects for the default set. Raises: - ValueError: If multiple matrices exist without a 'default' matrix. + ValueError: If multiple sets exist without a 'default' set. """ - if not self.matrices: + if not self.sets: return [] - if "default" in self.matrices: - return self.matrices["default"] + if "default" in self.sets: + return self.sets["default"] - if len(self.matrices) == 1: - return next(iter(self.matrices.values())) + if len(self.sets) == 1: + return next(iter(self.sets.values())) raise ValueError( - f"Multiple matrices exist ({list(self.matrices.keys())}) but no 'default' matrix. " - f"Please specify which matrix to use or add a 'default' matrix." + f"Multiple demand sets exist ({list(self.sets.keys())}) but no 'default' set. " + f"Please specify which set to use or add a 'default' set." ) def get_all_demands(self) -> list[TrafficDemand]: - """Get all traffic demands from all matrices combined. + """Get all traffic demands from all sets combined. Returns: - Flattened list of all TrafficDemand objects across all matrices. + Flattened list of all TrafficDemand objects across all sets. """ all_demands: list[TrafficDemand] = [] - for demands in self.matrices.values(): + for demands in self.sets.values(): all_demands.extend(demands) return all_demands @@ -92,9 +92,9 @@ def to_dict(self) -> dict[str, Any]: """Convert to dictionary for JSON serialization. Returns: - Dictionary mapping scenario names to lists of TrafficDemand dictionaries. + Dictionary mapping set names to lists of TrafficDemand dictionaries. """ return { name: [demand.__dict__ for demand in demands] - for name, demands in self.matrices.items() + for name, demands in self.sets.items() } diff --git a/ngraph/model/demand/spec.py b/ngraph/model/demand/spec.py index 0840b9d..0534910 100644 --- a/ngraph/model/demand/spec.py +++ b/ngraph/model/demand/spec.py @@ -6,7 +6,7 @@ """ from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union +from typing import TYPE_CHECKING, Any, Dict, Optional, Union from ngraph.model.flow.policy_config import FlowPolicyPreset from ngraph.utils.ids import new_base64_uuid @@ -25,39 +25,35 @@ class TrafficDemand: Attributes: source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). - demand: Total demand volume. - demand_placed: Portion of this demand placed so far. + target: Target node selector (string path or selector dict). + volume: Total demand volume. + volume_placed: Portion of this demand placed so far. priority: Priority class (lower = higher priority). mode: Node pairing mode ("combine" or "pairwise"). group_mode: How grouped nodes produce demands ("flatten", "per_group", "group_pairwise"). - expand_vars: Variable substitutions using $var syntax. - expansion_mode: How to combine expand_vars ("cartesian" or "zip"). - flow_policy_config: Policy preset for routing. - flow_policy: Concrete policy instance (overrides flow_policy_config). + flow_policy: Policy preset for routing. + flow_policy_obj: Concrete policy instance (overrides flow_policy). attrs: Arbitrary user metadata. id: Unique identifier. Auto-generated if empty. """ source: Union[str, Dict[str, Any]] = "" - sink: Union[str, Dict[str, Any]] = "" - demand: float = 0.0 - demand_placed: float = 0.0 + target: Union[str, Dict[str, Any]] = "" + volume: float = 0.0 + volume_placed: float = 0.0 priority: int = 0 mode: str = "combine" group_mode: str = "flatten" - expand_vars: Dict[str, List[Any]] = field(default_factory=dict) - expansion_mode: str = "cartesian" - flow_policy_config: Optional[FlowPolicyPreset] = None - flow_policy: Optional["FlowPolicy"] = None # type: ignore[valid-type] + flow_policy: Optional[FlowPolicyPreset] = None + flow_policy_obj: Optional["FlowPolicy"] = None # type: ignore[valid-type] attrs: Dict[str, Any] = field(default_factory=dict) id: str = "" def __post_init__(self) -> None: """Generate id if not provided.""" if not self.id: - # Build a stable identifier from source/sink + # Build a stable identifier from source/target src_key = self.source if isinstance(self.source, str) else str(self.source) - sink_key = self.sink if isinstance(self.sink, str) else str(self.sink) - self.id = f"{src_key}|{sink_key}|{new_base64_uuid()}" + tgt_key = self.target if isinstance(self.target, str) else str(self.target) + self.id = f"{src_key}|{tgt_key}|{new_base64_uuid()}" diff --git a/ngraph/model/failure/__init__.py b/ngraph/model/failure/__init__.py index 4124b1e..d23cb00 100644 --- a/ngraph/model/failure/__init__.py +++ b/ngraph/model/failure/__init__.py @@ -16,7 +16,7 @@ from .generate import GenerateSpec, generate_risk_groups, parse_generate_spec from .membership import MembershipSpec, resolve_membership_rules -from .policy import FailureCondition, FailureMode, FailurePolicy, FailureRule +from .policy import FailureMode, FailurePolicy, FailureRule from .policy_set import FailurePolicySet from .validation import validate_risk_group_hierarchy, validate_risk_group_references @@ -25,7 +25,6 @@ "FailurePolicy", "FailureRule", "FailureMode", - "FailureCondition", "FailurePolicySet", # Generation "GenerateSpec", diff --git a/ngraph/model/failure/generate.py b/ngraph/model/failure/generate.py index bebab9c..692f339 100644 --- a/ngraph/model/failure/generate.py +++ b/ngraph/model/failure/generate.py @@ -6,9 +6,10 @@ from __future__ import annotations +import re from collections import defaultdict from dataclasses import dataclass, field -from typing import TYPE_CHECKING, Any, Dict, List, Literal +from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional from ngraph.dsl.selectors import ( flatten_link_attrs, @@ -29,16 +30,18 @@ class GenerateSpec: """Parsed generate block specification. Attributes: - entity_scope: Type of entities to group ("node" or "link"). + scope: Type of entities to group ("node" or "link"). + path: Optional regex pattern to filter entities by name. group_by: Attribute name to group by (supports dot-notation). - name_template: Template for generated group names. Use ${value} + name: Template for generated group names. Use ${value} as placeholder for the attribute value. attrs: Optional static attributes for generated groups. """ - entity_scope: Literal["node", "link"] + scope: Literal["node", "link"] group_by: str - name_template: str + name: str + path: Optional[str] = None attrs: Dict[str, Any] = field(default_factory=dict) @@ -58,8 +61,10 @@ def generate_risk_groups(network: "Network", spec: GenerateSpec) -> List[RiskGro Note: This function modifies entity risk_groups sets in place. """ + path_pattern = re.compile(spec.path) if spec.path else None + # Collect entities and flatten function - if spec.entity_scope == "node": + if spec.scope == "node": entities = [ (node.name, node, flatten_node_attrs(node)) for node in network.nodes.values() @@ -70,6 +75,14 @@ def generate_risk_groups(network: "Network", spec: GenerateSpec) -> List[RiskGro for link_id, link in network.links.items() ] + # Apply path filter if specified + if path_pattern: + entities = [ + (eid, entity, attrs) + for eid, entity, attrs in entities + if path_pattern.match(eid) + ] + # Group by attribute value groups: Dict[Any, List] = defaultdict(list) for entity_id, entity, attrs in entities: @@ -81,7 +94,7 @@ def generate_risk_groups(network: "Network", spec: GenerateSpec) -> List[RiskGro result: List[RiskGroup] = [] for value, members in groups.items(): # Generate group name from template - name = spec.name_template.replace("${value}", str(value)) + name = spec.name.replace("${value}", str(value)) # Create risk group with specified attrs rg = RiskGroup(name=name, attrs=dict(spec.attrs)) @@ -95,7 +108,7 @@ def generate_risk_groups(network: "Network", spec: GenerateSpec) -> List[RiskGro _logger.debug( "Generated %d risk groups from %s.%s", len(result), - spec.entity_scope, + spec.scope, spec.group_by, ) @@ -114,28 +127,31 @@ def parse_generate_spec(raw: Dict[str, Any]) -> GenerateSpec: Raises: ValueError: If required fields are missing or invalid. """ - entity_scope = raw.get("entity_scope", "node") - if entity_scope not in ("node", "link"): - raise ValueError( - f"generate entity_scope must be 'node' or 'link', got '{entity_scope}'" - ) + scope = raw.get("scope") + if not scope: + raise ValueError("generate requires 'scope' field (node or link)") + if scope not in ("node", "link"): + raise ValueError(f"generate scope must be 'node' or 'link', got '{scope}'") + + path = raw.get("path") group_by = raw.get("group_by") if not group_by: raise ValueError("generate requires 'group_by' field") - name_template = raw.get("name_template") - if not name_template: - raise ValueError("generate requires 'name_template' field") + name = raw.get("name") + if not name: + raise ValueError("generate requires 'name' field") - if "${value}" not in name_template: - raise ValueError("generate name_template must contain '${value}' placeholder") + if "${value}" not in name: + raise ValueError("generate name must contain '${value}' placeholder") attrs = raw.get("attrs", {}) return GenerateSpec( - entity_scope=entity_scope, + scope=scope, group_by=group_by, - name_template=name_template, + name=name, + path=path, attrs=attrs, ) diff --git a/ngraph/model/failure/membership.py b/ngraph/model/failure/membership.py index 99f3d6e..2060edc 100644 --- a/ngraph/model/failure/membership.py +++ b/ngraph/model/failure/membership.py @@ -7,8 +7,9 @@ from __future__ import annotations +import re from dataclasses import dataclass -from typing import TYPE_CHECKING, Any, Dict, List, Union +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from ngraph.dsl.selectors import ( EntityScope, @@ -32,21 +33,23 @@ class MembershipSpec: """Parsed membership rule specification. Attributes: - entity_scope: Type of entities to match ("node", "link", or "risk_group"). + scope: Type of entities to match ("node", "link", or "risk_group"). + path: Optional regex pattern to filter entities by name. match: Match specification with conditions. """ - entity_scope: EntityScope - match: MatchSpec + scope: EntityScope + path: Optional[str] = None + match: Optional[MatchSpec] = None def resolve_membership_rules(network: "Network") -> None: """Apply membership rules to populate entity risk_groups sets. For each risk group with a `_membership_raw` specification: - - If entity_scope is "node" or "link": adds the risk group name to each + - If scope is "node" or "link": adds the risk group name to each matched entity's risk_groups set. - - If entity_scope is "risk_group": adds matched risk groups as children + - If scope is "risk_group": adds matched risk groups as children of this risk group (hierarchical membership). Args: @@ -68,9 +71,9 @@ def resolve_membership_rules(network: "Network") -> None: ) from e matched_count = 0 - if spec.entity_scope == "risk_group": + if spec.scope == "risk_group": # Hierarchical: add matched groups as children - matched_rgs = _select_risk_groups(network, spec.match) + matched_rgs = _select_risk_groups(network, spec) for matched_rg in matched_rgs: # Don't add self-reference if matched_rg.name != rg_name: @@ -88,7 +91,7 @@ def resolve_membership_rules(network: "Network") -> None: _logger.debug( "Resolved membership for '%s': scope=%s, matched=%d", rg_name, - spec.entity_scope, + spec.scope, matched_count, ) @@ -105,80 +108,136 @@ def _parse_membership_spec(raw: Dict[str, Any]) -> MembershipSpec: Raises: ValueError: If required fields are missing or invalid. """ - entity_scope = raw.get("entity_scope", "node") - if entity_scope not in ("node", "link", "risk_group"): + scope = raw.get("scope") + if not scope: raise ValueError( - f"entity_scope must be 'node', 'link', or 'risk_group', got '{entity_scope}'" + "membership requires 'scope' field (node, link, or risk_group)" + ) + if scope not in ("node", "link", "risk_group"): + raise ValueError( + f"scope must be 'node', 'link', or 'risk_group', got '{scope}'" ) + path = raw.get("path") match_raw = raw.get("match") - if match_raw is None: - raise ValueError("membership requires a 'match' block") - # Use unified parser with membership-specific defaults - match_spec = parse_match_spec( - match_raw, - default_logic="and", - require_conditions=True, - context="membership rule", - ) + # At least one of path or match must be specified + if path is None and match_raw is None: + raise ValueError("membership requires at least 'path' or 'match'") + + match_spec = None + if match_raw is not None: + # Use unified parser with membership-specific defaults + match_spec = parse_match_spec( + match_raw, + default_logic="and", + require_conditions=True, + context="membership rule", + ) - return MembershipSpec(entity_scope=entity_scope, match=match_spec) + return MembershipSpec(scope=scope, path=path, match=match_spec) def _select_entities( network: "Network", spec: MembershipSpec ) -> List[Union["Node", "Link"]]: - """Select nodes or links based on match conditions. + """Select nodes or links based on path and/or match conditions. Uses the shared match_entity_ids() function from selectors. Args: network: Network to search. - spec: Membership specification with entity_scope and match. + spec: Membership specification with scope, path, and match. Returns: List of matched Node or Link objects. """ - if spec.entity_scope == "node": + path_pattern = re.compile(spec.path) if spec.path else None + + if spec.scope == "node": # Build flattened attrs dict for all nodes entity_attrs = { node.name: flatten_node_attrs(node) for node in network.nodes.values() } - matched_ids = match_entity_ids( - entity_attrs, spec.match.conditions, spec.match.logic - ) + # Start with all or path-filtered IDs + if path_pattern: + candidate_ids = {eid for eid in entity_attrs if path_pattern.match(eid)} + else: + candidate_ids = set(entity_attrs.keys()) + + # Apply match conditions if specified + if spec.match: + filtered_attrs = { + k: v for k, v in entity_attrs.items() if k in candidate_ids + } + matched_ids = match_entity_ids( + filtered_attrs, spec.match.conditions, spec.match.logic + ) + else: + matched_ids = candidate_ids + return [network.nodes[node_id] for node_id in matched_ids] - elif spec.entity_scope == "link": + elif spec.scope == "link": # Build flattened attrs dict for all links entity_attrs = { link_id: flatten_link_attrs(link, link_id) for link_id, link in network.links.items() } - matched_ids = match_entity_ids( - entity_attrs, spec.match.conditions, spec.match.logic - ) + # Start with all or path-filtered IDs + if path_pattern: + candidate_ids = {eid for eid in entity_attrs if path_pattern.match(eid)} + else: + candidate_ids = set(entity_attrs.keys()) + + # Apply match conditions if specified + if spec.match: + filtered_attrs = { + k: v for k, v in entity_attrs.items() if k in candidate_ids + } + matched_ids = match_entity_ids( + filtered_attrs, spec.match.conditions, spec.match.logic + ) + else: + matched_ids = candidate_ids + return [network.links[link_id] for link_id in matched_ids] return [] -def _select_risk_groups(network: "Network", match: MatchSpec) -> List["RiskGroup"]: - """Select risk groups based on match conditions. +def _select_risk_groups(network: "Network", spec: MembershipSpec) -> List["RiskGroup"]: + """Select risk groups based on path and/or match conditions. Uses the shared match_entity_ids() function from selectors. Args: network: Network with risk_groups. - match: Match specification with conditions. + spec: Membership specification with path and match. Returns: List of matched RiskGroup objects. """ + path_pattern = re.compile(spec.path) if spec.path else None + # Build flattened attrs dict for all risk groups entity_attrs = { rg.name: flatten_risk_group_attrs(rg) for rg in network.risk_groups.values() } - matched_ids = match_entity_ids(entity_attrs, match.conditions, match.logic) + + # Start with all or path-filtered IDs + if path_pattern: + candidate_ids = {eid for eid in entity_attrs if path_pattern.match(eid)} + else: + candidate_ids = set(entity_attrs.keys()) + + # Apply match conditions if specified + if spec.match: + filtered_attrs = {k: v for k, v in entity_attrs.items() if k in candidate_ids} + matched_ids = match_entity_ids( + filtered_attrs, spec.match.conditions, spec.match.logic + ) + else: + matched_ids = candidate_ids + return [network.risk_groups[rg_name] for rg_name in matched_ids] diff --git a/ngraph/model/failure/parser.py b/ngraph/model/failure/parser.py index acad19e..0498ff2 100644 --- a/ngraph/model/failure/parser.py +++ b/ngraph/model/failure/parser.py @@ -4,9 +4,9 @@ from typing import Any, Callable, Dict, List, Optional +from ngraph.dsl.selectors import Condition from ngraph.logging import get_logger from ngraph.model.failure.policy import ( - FailureCondition, FailureMode, FailurePolicy, FailureRule, @@ -106,37 +106,65 @@ def build_failure_policy( policy_name: str, derive_seed: Callable[[str], Optional[int]], ) -> FailurePolicy: + """Build a FailurePolicy from a raw configuration dictionary. + + Parses modes, rules, and conditions from the policy definition and + constructs a fully initialized FailurePolicy object. + + Args: + fp_data: Policy definition dict with keys: modes (required), attrs, + expand_groups, expand_children. Each mode contains weight and rules. + policy_name: Name identifier for this policy (used for seed derivation). + derive_seed: Callable to derive deterministic seeds from component names. + + Returns: + FailurePolicy: Configured policy with parsed modes and rules. + + Raises: + ValueError: If modes is empty or malformed, or if rules are invalid. + """ + def build_rules(rule_dicts: List[Dict[str, Any]]) -> List[FailureRule]: out: List[FailureRule] = [] for rule_dict in rule_dicts: - entity_scope = rule_dict.get("entity_scope", "node") - conditions_data = rule_dict.get("conditions", []) + scope = rule_dict.get("scope") + if not scope: + raise ValueError( + "failure rule requires 'scope' field (node, link, or risk_group)" + ) + + # Get conditions from match block + match_block = rule_dict.get("match", {}) + conditions_data = match_block.get("conditions", []) + logic = match_block.get("logic", "or") + if not isinstance(conditions_data, list): raise ValueError("Each rule's 'conditions' must be a list if present.") - conditions: List[FailureCondition] = [] + conditions: List[Condition] = [] for cond_dict in conditions_data: conditions.append( - FailureCondition( + Condition( attr=cond_dict["attr"], - operator=cond_dict["operator"], - value=cond_dict["value"], + op=cond_dict["op"], + value=cond_dict.get("value"), ) ) out.append( FailureRule( - entity_scope=entity_scope, + scope=scope, conditions=conditions, - logic=rule_dict.get("logic", "or"), - rule_type=rule_dict.get("rule_type", "all"), + logic=logic, + mode=rule_dict.get("mode", "all"), probability=rule_dict.get("probability", 1.0), count=rule_dict.get("count", 1), weight_by=rule_dict.get("weight_by"), + path=rule_dict.get("path"), ) ) return out - fail_srg = fp_data.get("fail_risk_groups", False) - fail_rg_children = fp_data.get("fail_risk_group_children", False) + expand_groups = fp_data.get("expand_groups", False) + expand_children = fp_data.get("expand_children", False) attrs = normalize_yaml_dict_keys(fp_data.get("attrs", {})) modes: List[FailureMode] = [] @@ -158,8 +186,8 @@ def build_rules(rule_dicts: List[Dict[str, Any]]) -> List[FailureRule]: return FailurePolicy( attrs=attrs, - fail_risk_groups=fail_srg, - fail_risk_group_children=fail_rg_children, + expand_groups=expand_groups, + expand_children=expand_children, seed=policy_seed, modes=modes, ) diff --git a/ngraph/model/failure/policy.py b/ngraph/model/failure/policy.py index 7476e01..9805d89 100644 --- a/ngraph/model/failure/policy.py +++ b/ngraph/model/failure/policy.py @@ -1,11 +1,11 @@ """Failure policy primitives. -Defines `FailureCondition`, `FailureRule`, and `FailurePolicy` for expressing -how nodes, links, and risk groups fail in analyses. Conditions match on -top-level attributes with simple operators; rules select matches using -"all", probabilistic "random" (with `probability`), or fixed-size "choice" -(with `count`). Policies can optionally expand failures by shared risk groups -or by risk-group children. +Defines `FailureRule` and `FailurePolicy` for expressing how nodes, links, +and risk groups fail in analyses. Conditions match on top-level attributes +with simple operators; rules select matches using "all", probabilistic +"random" (with `probability`), or fixed-size "choice" (with `count`). +Policies can optionally expand failures by shared risk groups or by +risk-group children. """ from __future__ import annotations @@ -17,51 +17,41 @@ from ngraph.dsl.selectors import Condition, EntityScope, match_entity_ids -# Alias for clarity in failure policy context -FailureCondition = Condition - @dataclass class FailureRule: """Defines how to match and then select entities for failure. Attributes: - entity_scope (EntityScope): - The type of entities this rule applies to: "node", "link", or "risk_group". - conditions (List[FailureCondition]): - A list of conditions to filter matching entities. - logic (Literal["and", "or"]): - "and": All conditions must be true for a match. - "or": At least one condition is true for a match (default). - rule_type (Literal["random", "choice", "all"]): - The selection strategy among the matched set: - - "random": each matched entity is chosen with probability = `probability`. - - "choice": pick exactly `count` items from the matched set (random sample). - - "all": select every matched entity in the matched set. - probability (float): - Probability in [0,1], used if `rule_type="random"`. - count (int): - Number of entities to pick if `rule_type="choice"`. + scope: The type of entities this rule applies to: "node", "link", + or "risk_group". + conditions: A list of conditions to filter matching entities. + logic: "and" (all must be true) or "or" (any must be true, default). + mode: The selection strategy among the matched set: + - "random": each matched entity is chosen with probability. + - "choice": pick exactly `count` items (random sample). + - "all": select every matched entity. + probability: Probability in [0,1], used if mode="random". + count: Number of entities to pick if mode="choice". + weight_by: Optional attribute for weighted sampling in choice mode. + path: Optional regex pattern to filter entities by name. """ - entity_scope: EntityScope - conditions: List[FailureCondition] = field(default_factory=list) + scope: EntityScope + conditions: List[Condition] = field(default_factory=list) logic: Literal["and", "or"] = "or" - rule_type: Literal["random", "choice", "all"] = "all" + mode: Literal["random", "choice", "all"] = "all" probability: float = 1.0 count: int = 1 - # Optional attribute for weighted sampling in choice mode - # When set and rule_type=="choice", items are sampled without replacement - # with probability proportional to the non-negative numeric value of this attribute. - # If all weights are non-positive or missing, fallback to uniform sampling. weight_by: Optional[str] = None + path: Optional[str] = None def __post_init__(self) -> None: - if self.rule_type == "random": + if self.mode == "random": if not (0.0 <= self.probability <= 1.0): raise ValueError( f"probability={self.probability} must be within [0,1] " - f"for rule_type='random'." + f"for mode='random'." ) @@ -87,71 +77,29 @@ class FailureMode: @dataclass class FailurePolicy: - """A container for multiple FailureRules plus optional metadata in `attrs`. + """A container for failure modes plus optional metadata in `attrs`. The main entry point is `apply_failures`, which: - 1) For each rule, gather the relevant entities (node, link, or risk_group). - 2) Match them based on rule conditions using 'and' or 'or' logic. - 3) Apply the selection strategy (all, random, or choice). - 4) Collect the union of all failed entities across all rules. - 5) Optionally expand failures by shared-risk groups or sub-risks. - - Example YAML configuration: - ```yaml - failure_policy: - attrs: - description: "Regional power grid failure affecting telecom infrastructure" - fail_risk_groups: true - rules: - # Fail all nodes in Texas electrical grid - - entity_scope: "node" - conditions: - - attr: "electric_grid" - operator: "==" - value: "texas" - logic: "and" - rule_type: "all" - - # Randomly fail 40% of underground fiber links in affected region - - entity_scope: "link" - conditions: - - attr: "region" - operator: "==" - value: "southwest" - - attr: "installation" - operator: "==" - value: "underground" - logic: "and" - rule_type: "random" - probability: 0.4 - - # Choose exactly 2 risk groups to fail (e.g., data centers) - # Note: logic defaults to "or" when not specified - - entity_scope: "risk_group" - rule_type: "choice" - count: 2 - ``` + 1) Select a mode based on weights. + 2) For each rule in the mode, gather relevant entities. + 3) Match based on rule conditions using 'and' or 'or' logic. + 4) Apply the selection strategy (all, random, or choice). + 5) Collect the union of all failed entities across all rules. + 6) Optionally expand failures by shared-risk groups or sub-risks. Attributes: - rules (List[FailureRule]): - A list of FailureRules to apply. - attrs (Dict[str, Any]): - Arbitrary metadata about this policy (e.g. "name", "description"). - fail_risk_groups (bool): - If True, after initial selection, expand failures among any - node/link that shares a risk group with a failed entity. - fail_risk_group_children (bool): - If True, and if a risk_group is marked as failed, expand to - children risk_groups recursively. - seed (Optional[int]): - Seed for reproducible random operations. If None, operations - will be non-deterministic. - + attrs: Arbitrary metadata about this policy. + expand_groups: If True, expand failures among entities sharing + risk groups with failed entities. + expand_children: If True, expand failed risk groups to include + their children recursively. + seed: Seed for reproducible random operations. + modes: List of weighted failure modes. """ attrs: Dict[str, Any] = field(default_factory=dict) - fail_risk_groups: bool = False - fail_risk_group_children: bool = False + expand_groups: bool = False + expand_children: bool = False seed: Optional[int] = None modes: List[FailureMode] = field(default_factory=list) @@ -223,12 +171,8 @@ def apply_failures( rule, effective_seed, network_nodes - if rule.entity_scope == "node" - else ( - network_links - if rule.entity_scope == "link" - else network_risk_groups - ), + if rule.scope == "node" + else (network_links if rule.scope == "link" else network_risk_groups), ) # Record selection in trace if non-empty @@ -236,19 +180,19 @@ def apply_failures( failure_trace["selections"].append( { "rule_index": idx, - "entity_scope": rule.entity_scope, - "rule_type": rule.rule_type, + "scope": rule.scope, + "mode": rule.mode, "matched_count": len(matched_ids), "selected_ids": sorted(selected), } ) # Add them to the respective fail sets - if rule.entity_scope == "node": + if rule.scope == "node": failed_nodes |= set(selected) - elif rule.entity_scope == "link": + elif rule.scope == "link": failed_links |= set(selected) - elif rule.entity_scope == "risk_group": + elif rule.scope == "risk_group": failed_risk_groups |= set(selected) # Snapshot before expansion for trace @@ -261,13 +205,13 @@ def apply_failures( pre_rgs = set(failed_risk_groups) # Optionally expand by risk groups - if self.fail_risk_groups: + if self.expand_groups: self._expand_risk_groups( failed_nodes, failed_links, network_nodes, network_links ) # Optionally expand failed risk-group children - if self.fail_risk_group_children and failed_risk_groups: + if self.expand_children and failed_risk_groups: self._expand_failed_risk_group_children( failed_risk_groups, network_risk_groups ) @@ -285,7 +229,7 @@ def apply_failures( def _match_scope( self, - rule_idx: int, + _rule_idx: int, rule: FailureRule, network_nodes: Dict[str, Any], network_links: Dict[str, Any], @@ -294,14 +238,26 @@ def _match_scope( """Get the set of IDs matched by the given rule. Uses the shared match_entity_ids() function from selectors. + Applies optional path filter if specified. """ + import re + # Decide which mapping to iterate - if rule.entity_scope == "node": - return match_entity_ids(network_nodes, rule.conditions, rule.logic) - elif rule.entity_scope == "link": - return match_entity_ids(network_links, rule.conditions, rule.logic) + if rule.scope == "node": + candidates = match_entity_ids(network_nodes, rule.conditions, rule.logic) + elif rule.scope == "link": + candidates = match_entity_ids(network_links, rule.conditions, rule.logic) else: # risk_group - return match_entity_ids(network_risk_groups, rule.conditions, rule.logic) + candidates = match_entity_ids( + network_risk_groups, rule.conditions, rule.logic + ) + + # Apply path filter if specified + if rule.path: + pattern = re.compile(rule.path) + candidates = {eid for eid in candidates if pattern.match(eid)} + + return candidates @staticmethod def _select_entities( @@ -312,7 +268,7 @@ def _select_entities( ) -> Set[str]: """Select entities for failure per rule. - For rule_type="choice" and rule.weight_by set, perform weighted sampling + For mode="choice" and rule.weight_by set, perform weighted sampling without replacement according to the specified attribute. If all weights are non-positive or missing, fallback to uniform sampling. """ @@ -324,10 +280,10 @@ def _select_entities( # intentionally non-deterministic across processes (hash randomization). ordered_ids = sorted(entity_ids) - if rule.rule_type == "random": + if rule.mode == "random": rng = _random.Random(seed) if seed is not None else _random return {eid for eid in ordered_ids if rng.random() < rule.probability} - elif rule.rule_type == "choice": + elif rule.mode == "choice": count = min(rule.count, len(entity_ids)) if count <= 0: return set() @@ -370,10 +326,10 @@ def _select_entities( entity_list = ordered_ids rng = _random.Random(seed) if seed is not None else _random return set(rng.sample(entity_list, k=count)) - elif rule.rule_type == "all": + elif rule.mode == "all": return entity_ids else: - raise ValueError(f"Unsupported rule_type: {rule.rule_type}") + raise ValueError(f"Unsupported mode: {rule.mode}") @staticmethod def _extract_weight(entity: Any, attr_name: str) -> float: @@ -558,8 +514,8 @@ def to_dict(self) -> Dict[str, Any]: """ data: Dict[str, Any] = { "attrs": self.attrs, - "fail_risk_groups": self.fail_risk_groups, - "fail_risk_group_children": self.fail_risk_group_children, + "expand_groups": self.expand_groups, + "expand_children": self.expand_children, "seed": self.seed, } if self.modes: @@ -568,20 +524,21 @@ def to_dict(self) -> Dict[str, Any]: "weight": mode.weight, "rules": [ { - "entity_scope": rule.entity_scope, + "scope": rule.scope, "conditions": [ { "attr": cond.attr, - "operator": cond.operator, + "op": cond.op, "value": cond.value, } for cond in rule.conditions ], "logic": rule.logic, - "rule_type": rule.rule_type, + "mode": rule.mode, "probability": rule.probability, "count": rule.count, **({"weight_by": rule.weight_by} if rule.weight_by else {}), + **({"path": rule.path} if rule.path else {}), } for rule in mode.rules ], diff --git a/ngraph/model/failure/validation.py b/ngraph/model/failure/validation.py index 91cf98c..edfc1c7 100644 --- a/ngraph/model/failure/validation.py +++ b/ngraph/model/failure/validation.py @@ -60,7 +60,7 @@ def validate_risk_group_hierarchy(network: "Network") -> None: Uses DFS-based cycle detection to find any risk group that is part of a cycle in the children hierarchy. This can happen when membership rules - with entity_scope='risk_group' create mutual parent-child relationships. + with scope='risk_group' create mutual parent-child relationships. Args: network: Network with risk_groups populated (after membership resolution). @@ -113,7 +113,7 @@ def dfs(node: str) -> List[str]: f"Circular reference detected in risk group hierarchy:\n" f" {cycle_str}\n\n" f"Risk groups cannot form cycles in their parent-child relationships. " - f"This may be caused by membership rules with entity_scope='risk_group' " + f"This may be caused by membership rules with scope='risk_group' " f"that create mutual parent-child relationships. Review the membership " f"rules for these groups and adjust conditions to break the cycle." ) diff --git a/ngraph/model/flow/policy_config.py b/ngraph/model/flow/policy_config.py index 5d0c820..036b021 100644 --- a/ngraph/model/flow/policy_config.py +++ b/ngraph/model/flow/policy_config.py @@ -197,7 +197,7 @@ def create_flow_policy( def serialize_policy_preset(cfg: Any) -> Optional[str]: """Serialize a FlowPolicyPreset to its string name for JSON storage. - Handles FlowPolicyPreset enum values, integer enum values, and string fallbacks. + Handles FlowPolicyPreset enum values, integer enum values, and string inputs. Returns None for None input. Args: diff --git a/ngraph/model/path.py b/ngraph/model/path.py index 1ad9bef..b5c69ed 100644 --- a/ngraph/model/path.py +++ b/ngraph/model/path.py @@ -4,31 +4,22 @@ cost. Cached properties expose derived sequences for nodes and edges, and helpers provide equality, ordering by cost, and sub-path extraction with cost recalculation. - -Breaking change from v1.x: Edge references now use EdgeRef (link_id + direction) -instead of integer edge keys for stable scenario-level edge identification. """ from __future__ import annotations from dataclasses import dataclass, field from functools import cached_property -from typing import TYPE_CHECKING, Any, Iterator, Set, Tuple +from typing import Any, Iterator, Set, Tuple from ngraph.types.base import Cost from ngraph.types.dto import EdgeRef -if TYPE_CHECKING: - from netgraph_core import StrictMultiDiGraph - @dataclass class Path: """Represents a single path in the network. - Breaking change from v1.x: path field now uses EdgeRef (link_id + direction) - instead of integer edge keys for stable scenario-level edge identification. - Attributes: path: Sequence of (node_name, (edge_refs...)) tuples representing the path. The final element typically has an empty tuple of edge refs. @@ -154,27 +145,14 @@ def nodes_seq(self) -> Tuple[str, ...]: """ return tuple(node for node, _ in self.path) - def get_sub_path( - self, - dst_node: str, - graph: StrictMultiDiGraph | None = None, - cost_attr: str = "cost", - ) -> Path: + def get_sub_path(self, dst_node: str) -> Path: """Create a sub-path ending at the specified destination node. The sub-path is formed by truncating the original path at the first occurrence of `dst_node` and ensuring that the final element has an empty tuple of edges. - Note: With EdgeRef-based paths, cost recalculation requires graph lookup. - The graph and cost_attr parameters are accepted for interface compatibility - but not currently used. Cost is set to infinity to explicitly indicate - recalculation is needed. Check for `math.isinf(sub_path.cost)` if you need - the actual cost. - Args: dst_node: The node at which to truncate the path. - graph: Graph for cost recalculation (currently unused). - cost_attr: Edge attribute for cost lookup (currently unused). Returns: A new Path instance representing the sub-path from the original source @@ -183,9 +161,6 @@ def get_sub_path( Raises: ValueError: If `dst_node` is not found in the current path. """ - # Suppress unused parameter warnings - accepted for interface compatibility - _ = graph, cost_attr - new_elements = [] found = False diff --git a/ngraph/profiling/profiler.py b/ngraph/profiling/profiler.py index 542711d..3896617 100644 --- a/ngraph/profiling/profiler.py +++ b/ngraph/profiling/profiler.py @@ -393,8 +393,11 @@ def save_detailed_profile( f"No detailed profile data available for step: {step_name}" ) else: - # Save combined profile data (if available) - logger.warning("Combined profile saving not yet implemented") + raise NotImplementedError( + "Combined profile saving requires a step_name argument. " + "To save all step profiles, iterate over step_profiles and call " + "save_detailed_profile for each step." + ) class PerformanceReporter: diff --git a/ngraph/results/snapshot.py b/ngraph/results/snapshot.py index 85ebfdf..3716c59 100644 --- a/ngraph/results/snapshot.py +++ b/ngraph/results/snapshot.py @@ -1,6 +1,6 @@ """Scenario snapshot helpers. -Build a concise dictionary snapshot of failure policies and traffic matrices for +Build a concise dictionary snapshot of failure policies and demand sets for export into results without keeping heavy domain objects. """ @@ -13,8 +13,22 @@ def build_scenario_snapshot( *, seed: int | None, failure_policy_set, - traffic_matrix_set, + demand_set, ) -> Dict[str, Any]: + """Build a concise dictionary snapshot of the scenario state. + + Creates a serializable representation of the scenario's failure policies + and demand sets, suitable for export into results without keeping heavy + domain objects. + + Args: + seed: Scenario-level seed for reproducibility, or None if unseeded. + failure_policy_set: FailurePolicySet containing named failure policies. + demand_set: DemandSet containing named demand collections. + + Returns: + Dict containing: seed, failures (policy snapshots), demands (demand snapshots). + """ snapshot_failure_policies: Dict[str, Any] = {} for name, policy in getattr(failure_policy_set, "policies", {}).items(): modes_list: list[dict[str, Any]] = [] @@ -27,15 +41,16 @@ def build_scenario_snapshot( for rule in getattr(mode, "rules", []) or []: mode_dict["rules"].append( { - "entity_scope": getattr(rule, "entity_scope", "node"), + "scope": getattr(rule, "scope", "node"), "logic": getattr(rule, "logic", "or"), - "rule_type": getattr(rule, "rule_type", "all"), + "mode": getattr(rule, "mode", "all"), "probability": float(getattr(rule, "probability", 1.0)), "count": int(getattr(rule, "count", 1)), + "path": getattr(rule, "path", None), "conditions": [ { "attr": c.attr, - "operator": c.operator, + "op": c.op, "value": c.value, } for c in getattr(rule, "conditions", []) or [] @@ -45,30 +60,32 @@ def build_scenario_snapshot( modes_list.append(mode_dict) snapshot_failure_policies[name] = { "attrs": dict(getattr(policy, "attrs", {}) or {}), + "expand_groups": getattr(policy, "expand_groups", False), + "expand_children": getattr(policy, "expand_children", False), "modes": modes_list, } - snapshot_tms: Dict[str, list[dict[str, Any]]] = {} - for mname, demands in getattr(traffic_matrix_set, "matrices", {}).items(): + snapshot_demands: Dict[str, list[dict[str, Any]]] = {} + for sname, demands in getattr(demand_set, "sets", {}).items(): entries: list[dict[str, Any]] = [] for d in demands: entries.append( { "id": getattr(d, "id", None), "source": getattr(d, "source", ""), - "sink": getattr(d, "sink", ""), - "demand": float(getattr(d, "demand", 0.0)), + "target": getattr(d, "target", ""), + "volume": float(getattr(d, "volume", 0.0)), "priority": int(getattr(d, "priority", 0)), "mode": getattr(d, "mode", "pairwise"), "group_mode": getattr(d, "group_mode", "flatten"), - "flow_policy_config": getattr(d, "flow_policy_config", None), + "flow_policy": getattr(d, "flow_policy", None), "attrs": dict(getattr(d, "attrs", {}) or {}), } ) - snapshot_tms[mname] = entries + snapshot_demands[sname] = entries return { "seed": seed, - "failure_policy_set": snapshot_failure_policies, - "traffic_matrices": snapshot_tms, + "failures": snapshot_failure_policies, + "demands": snapshot_demands, } diff --git a/ngraph/scenario.py b/ngraph/scenario.py index b1a3042..65e7aa9 100644 --- a/ngraph/scenario.py +++ b/ngraph/scenario.py @@ -9,8 +9,8 @@ from ngraph.dsl.loader import load_scenario_yaml from ngraph.logging import get_logger from ngraph.model.components import ComponentsLibrary -from ngraph.model.demand.builder import build_traffic_matrix_set -from ngraph.model.demand.matrix import TrafficMatrixSet +from ngraph.model.demand.builder import build_demand_set +from ngraph.model.demand.matrix import DemandSet from ngraph.model.failure.generate import generate_risk_groups, parse_generate_spec from ngraph.model.failure.membership import resolve_membership_rules from ngraph.model.failure.parser import build_failure_policy_set, build_risk_groups @@ -50,7 +50,7 @@ class Scenario: network: Network workflow: List[WorkflowStep] failure_policy_set: FailurePolicySet = field(default_factory=FailurePolicySet) - traffic_matrix_set: TrafficMatrixSet = field(default_factory=TrafficMatrixSet) + demand_set: DemandSet = field(default_factory=DemandSet) results: Results = field(default_factory=Results) components_library: ComponentsLibrary = field(default_factory=ComponentsLibrary) seed: Optional[int] = None @@ -92,12 +92,12 @@ def from_yaml( Top-level YAML keys can include: - vars: YAML anchors for value reuse - blueprints: Reusable topology templates - - network: Nodes, links, groups, adjacency + - components: Hardware component library + - network: Nodes, links, node_rules, link_rules - risk_groups: Failure correlation groups (direct, membership rules, generate blocks) - - failure_policy_set: Failure simulation policies - - traffic_matrix_set: Traffic demand definitions + - demands: Traffic demand definitions (named sets) + - failures: Failure simulation policies - workflow: Analysis execution steps - - components: Hardware component library - seed: Master seed for reproducible randomness Risk group processing: @@ -107,7 +107,7 @@ def from_yaml( 4. References are validated (undefined groups and circular hierarchies detected) If no 'workflow' key is provided, the scenario has no steps to run. - If 'failure_policy_set' is omitted, scenario.failure_policy_set is empty. + If 'failures' is omitted, scenario.failure_policy_set is empty. If 'components' is provided, it is merged with default_components. If 'seed' is provided, it enables reproducible random operations. If 'vars' is provided, it can contain YAML anchors and aliases for reuse. @@ -151,7 +151,7 @@ def from_yaml( # 2) Build the failure policy set seed_manager = SeedManager(seed) failure_policy_set = build_failure_policy_set( - data.get("failure_policy_set", {}), + data.get("failures", {}), derive_seed=lambda n: seed_manager.derive_seed("failure_policy", n), ) @@ -167,26 +167,26 @@ def from_yaml( except Exception as exc: Scenario._logger.debug("Failed to log policy set stats: %s", exc) - # 3) Build traffic matrix set - raw = data.get("traffic_matrix_set", {}) - tms = build_traffic_matrix_set(raw) + # 3) Build demand sets + raw = data.get("demands", {}) + ds = build_demand_set(raw) try: - matrix_names = sorted(list(getattr(tms, "matrices", {}).keys())) + set_names = sorted(list(getattr(ds, "sets", {}).keys())) total_demands = 0 - for _mname, demands in getattr(tms, "matrices", {}).items(): + for _sname, demands in getattr(ds, "sets", {}).items(): total_demands += len(demands) Scenario._logger.debug( - "Constructed TrafficMatrixSet: matrices=%d, total_demands=%d%s", - len(matrix_names), + "Constructed DemandSet: sets=%d, total_demands=%d%s", + len(set_names), total_demands, ( - f" ({', '.join(matrix_names[:5])}{'...' if len(matrix_names) > 5 else ''})" - if matrix_names + f" ({', '.join(set_names[:5])}{'...' if len(set_names) > 5 else ''})" + if set_names else "" ), ) except Exception as exc: - Scenario._logger.debug("Failed to log traffic matrix stats: %s", exc) + Scenario._logger.debug("Failed to log demand set stats: %s", exc) # 4) Build workflow steps workflow_data = data.get("workflow", []) @@ -257,9 +257,9 @@ def from_yaml( raise ValueError( f"Generated risk group '{rg.name}' conflicts with existing " f"risk group. The generate block with group_by='{spec.group_by}' " - f"and name_template='{spec.name_template}' produced a name that " + f"and name='{spec.name}' produced a name that " f"already exists. Either rename the existing group or adjust " - f"the name_template to avoid collisions." + f"the name to avoid collisions." ) network_obj.risk_groups[rg.name] = rg except ValueError as e: @@ -282,7 +282,7 @@ def from_yaml( network=network_obj, failure_policy_set=failure_policy_set, workflow=workflow_steps, - traffic_matrix_set=tms, + demand_set=ds, components_library=final_components, seed=seed, ) @@ -293,7 +293,7 @@ def from_yaml( build_scenario_snapshot( seed=seed, failure_policy_set=failure_policy_set, - traffic_matrix_set=tms, + demand_set=ds, ) ) except Exception as exc: @@ -306,7 +306,7 @@ def from_yaml( len(getattr(network_obj, "nodes", {})), len(getattr(network_obj, "links", {})), len(getattr(failure_policy_set, "policies", {})), - len(getattr(tms, "matrices", {})), + len(getattr(ds, "sets", {})), len(workflow_steps), ) except Exception as exc: diff --git a/ngraph/schemas/scenario.json b/ngraph/schemas/scenario.json index 7b750cd..85facef 100644 --- a/ngraph/schemas/scenario.json +++ b/ngraph/schemas/scenario.json @@ -13,7 +13,7 @@ "type": "string", "description": "Attribute name to evaluate (supports dot-notation for nested attributes, e.g., 'hardware.vendor')" }, - "operator": { + "op": { "type": "string", "enum": [ "==", @@ -26,18 +26,18 @@ "not_contains", "in", "not_in", - "any_value", - "no_value" + "exists", + "not_exists" ], "description": "Comparison operator" }, "value": { - "description": "Value to compare against (not required for any_value/no_value operators)" + "description": "Value to compare against (not required for exists/not_exists operators)" } }, "required": [ "attr", - "operator" + "op" ], "additionalProperties": false }, @@ -51,7 +51,7 @@ "and", "or" ], - "description": "How to combine conditions (defaults vary by context: 'or' for adjacency/demands, 'and' for membership rules)" + "description": "How to combine conditions (defaults vary by context: 'or' for links/demands, 'and' for membership rules)" }, "conditions": { "type": "array", @@ -97,6 +97,165 @@ "$ref": "#/$defs/nodeSelector" } ] + }, + "expandBlock": { + "type": "object", + "description": "Variable expansion block", + "properties": { + "vars": { + "type": "object", + "description": "Variable substitutions using ${var} syntax", + "additionalProperties": { + "type": "array", + "items": {} + } + }, + "mode": { + "type": "string", + "enum": [ + "cartesian", + "zip" + ], + "description": "How to combine variable lists (default: cartesian)" + } + }, + "additionalProperties": false + }, + "linkProperties": { + "type": "object", + "description": "Common link properties (flattened, no wrapper)", + "properties": { + "capacity": { + "type": "number", + "description": "Link capacity" + }, + "cost": { + "type": "number", + "description": "Link cost" + }, + "disabled": { + "type": "boolean", + "description": "Whether the link is disabled" + }, + "risk_groups": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Risk groups this link belongs to" + }, + "attrs": { + "type": "object", + "description": "Additional link attributes", + "additionalProperties": true + } + }, + "additionalProperties": false + }, + "nodeDefinition": { + "type": "object", + "description": "Node definition - either single node, counted group, blueprint reference, or nested nodes", + "properties": { + "blueprint": { + "type": "string", + "description": "Blueprint to instantiate" + }, + "params": { + "type": "object", + "description": "Parameters to pass to the blueprint" + }, + "count": { + "type": "integer", + "minimum": 1, + "description": "Number of nodes to create (distinguishes group from single node)" + }, + "template": { + "type": "string", + "description": "Name template using {n} placeholder for sequential numbering" + }, + "nodes": { + "type": "object", + "description": "Nested child nodes (inline hierarchy without blueprints)", + "additionalProperties": { + "$ref": "#/$defs/nodeDefinition" + } + }, + "attrs": { + "type": "object", + "description": "Node attributes" + }, + "disabled": { + "type": "boolean", + "description": "Whether the node is disabled" + }, + "risk_groups": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Risk groups this node belongs to" + } + }, + "additionalProperties": false + }, + "linkDefinition": { + "type": "object", + "description": "Link definition with flattened properties", + "properties": { + "source": { + "$ref": "#/$defs/selectorOrString", + "description": "Source node selector or name" + }, + "target": { + "$ref": "#/$defs/selectorOrString", + "description": "Target node selector or name" + }, + "pattern": { + "type": "string", + "enum": [ + "mesh", + "one_to_one" + ], + "description": "Connection pattern between source and target sets" + }, + "count": { + "type": "integer", + "minimum": 1, + "description": "Number of parallel links to create" + }, + "capacity": { + "type": "number", + "description": "Link capacity" + }, + "cost": { + "type": "number", + "description": "Link cost" + }, + "disabled": { + "type": "boolean", + "description": "Whether the link is disabled" + }, + "risk_groups": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Risk groups this link belongs to" + }, + "attrs": { + "type": "object", + "description": "Additional link attributes", + "additionalProperties": true + }, + "expand": { + "$ref": "#/$defs/expandBlock" + } + }, + "required": [ + "source", + "target" + ], + "additionalProperties": false } }, "properties": { @@ -129,257 +288,27 @@ }, "nodes": { "type": "object", - "description": "Node definitions", - "patternProperties": { - "^[a-zA-Z0-9_-]+$": { - "type": "object", - "properties": { - "attrs": { - "type": "object", - "description": "Node attributes. Supports 'hardware': {component, count} for node hardware component definition.", - "properties": { - "hardware": { - "type": "object", - "properties": { - "component": { - "type": "string" - }, - "count": { - "type": "number", - "minimum": 0 - } - }, - "additionalProperties": false - } - }, - "additionalProperties": true - }, - "disabled": { - "type": "boolean", - "description": "Whether the node is disabled" - }, - "risk_groups": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Risk groups this node belongs to" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false + "description": "Node definitions (single nodes or counted groups distinguished by 'count' field)", + "additionalProperties": { + "$ref": "#/$defs/nodeDefinition" + } }, "links": { "type": "array", "description": "Link definitions", "items": { - "type": "object", - "properties": { - "source": { - "type": "string", - "description": "Source node name" - }, - "target": { - "type": "string", - "description": "Target node name" - }, - "link_params": { - "type": "object", - "properties": { - "capacity": { - "type": "number", - "description": "Link capacity" - }, - "cost": { - "type": "number", - "description": "Link cost" - }, - "disabled": { - "type": "boolean", - "description": "Whether the link is disabled" - }, - "risk_groups": { - "type": "array", - "items": { - "type": "string" - }, - "description": "Risk groups this link belongs to" - }, - "attrs": { - "type": "object", - "description": "Additional link attributes. Supports per-end hardware under 'hardware': {source: {component, count, exclusive}, target: {component, count, exclusive}}", - "properties": { - "hardware": { - "type": "object", - "properties": { - "source": { - "type": "object", - "properties": { - "component": { - "type": "string" - }, - "count": { - "type": "number", - "minimum": 0 - }, - "exclusive": { - "type": "boolean" - } - }, - "additionalProperties": false - }, - "target": { - "type": "object", - "properties": { - "component": { - "type": "string" - }, - "count": { - "type": "number", - "minimum": 0 - }, - "exclusive": { - "type": "boolean" - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - } - }, - "additionalProperties": true - } - }, - "additionalProperties": false - }, - "link_count": { - "type": "integer", - "minimum": 1, - "description": "Number of parallel links to create" - } - }, - "required": [ - "source", - "target" - ], - "additionalProperties": false - } - }, - "groups": { - "type": "object", - "description": "Node group definitions for blueprint expansion. NOTE: Runtime validation enforces that groups with 'use_blueprint' can only have {use_blueprint, parameters, attrs, disabled, risk_groups}, while groups without 'use_blueprint' can only have {node_count, name_template, attrs, disabled, risk_groups}.", - "patternProperties": { - "^[a-zA-Z0-9_\\[\\]-]+$": { - "type": "object", - "properties": { - "use_blueprint": { - "type": "string" - }, - "parameters": { - "type": "object" - }, - "node_count": { - "type": "integer", - "minimum": 1 - }, - "name_template": { - "type": "string" - }, - "attrs": { - "type": "object" - }, - "disabled": { - "type": "boolean" - }, - "risk_groups": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - } - } - }, - "adjacency": { - "type": "array", - "description": "Adjacency rules for blueprint expansion", - "items": { - "type": "object", - "properties": { - "source": { - "$ref": "#/$defs/selectorOrString" - }, - "target": { - "$ref": "#/$defs/selectorOrString" - }, - "pattern": { - "type": "string" - }, - "link_count": { - "type": "integer", - "minimum": 1 - }, - "link_params": { - "type": "object", - "properties": { - "capacity": { - "type": "number" - }, - "cost": { - "type": "number" - }, - "disabled": { - "type": "boolean" - }, - "risk_groups": { - "type": "array", - "items": { - "type": "string" - } - }, - "attrs": { - "type": "object" - } - }, - "additionalProperties": false - }, - "expand_vars": { - "type": "object", - "description": "Variable substitutions using $var or ${var} syntax", - "additionalProperties": { - "type": "array", - "items": {} - } - }, - "expansion_mode": { - "type": "string", - "enum": [ - "cartesian", - "zip" - ], - "description": "How to combine expand_vars lists" - } - }, - "required": [ - "source", - "target" - ], - "additionalProperties": false + "$ref": "#/$defs/linkDefinition" } }, - "node_overrides": { + "node_rules": { "type": "array", - "description": "Node override rules", + "description": "Node override rules (post-expansion)", "items": { "type": "object", "properties": { "path": { - "type": "string" + "type": "string", + "description": "Regex pattern on node.name" }, "match": { "$ref": "#/$defs/matchSpec" @@ -395,14 +324,17 @@ "items": { "type": "string" } + }, + "expand": { + "$ref": "#/$defs/expandBlock" } }, "additionalProperties": false } }, - "link_overrides": { + "link_rules": { "type": "array", - "description": "Link override rules", + "description": "Link override rules (post-expansion)", "items": { "type": "object", "properties": { @@ -412,32 +344,34 @@ "target": { "$ref": "#/$defs/selectorOrString" }, - "any_direction": { + "bidirectional": { + "type": "boolean", + "description": "Whether to match links in both directions" + }, + "link_match": { + "$ref": "#/$defs/matchSpec", + "description": "Filter by the link's own attributes" + }, + "capacity": { + "type": "number" + }, + "cost": { + "type": "number" + }, + "disabled": { "type": "boolean" }, - "link_params": { - "type": "object", - "properties": { - "capacity": { - "type": "number" - }, - "cost": { - "type": "number" - }, - "disabled": { - "type": "boolean" - }, - "risk_groups": { - "type": "array", - "items": { - "type": "string" - } - }, - "attrs": { - "type": "object" - } - }, - "additionalProperties": false + "risk_groups": { + "type": "array", + "items": { + "type": "string" + } + }, + "attrs": { + "type": "object" + }, + "expand": { + "$ref": "#/$defs/expandBlock" } }, "additionalProperties": false @@ -449,123 +383,26 @@ "blueprints": { "type": "object", "description": "Reusable network blueprint definitions", - "patternProperties": { - "^[a-zA-Z0-9_-]+$": { - "type": "object", - "properties": { - "groups": { - "type": "object", - "description": "Node group definitions for blueprint expansion.", - "patternProperties": { - "^[a-zA-Z0-9_\\[\\]-]+$": { - "type": "object", - "properties": { - "use_blueprint": { - "type": "string" - }, - "parameters": { - "type": "object" - }, - "node_count": { - "type": "integer", - "minimum": 1 - }, - "name_template": { - "type": "string" - }, - "attrs": { - "type": "object" - }, - "disabled": { - "type": "boolean" - }, - "risk_groups": { - "type": "array", - "items": { - "type": "string" - } - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - "adjacency": { - "type": "array", - "items": { - "type": "object", - "properties": { - "source": { - "$ref": "#/$defs/selectorOrString" - }, - "target": { - "$ref": "#/$defs/selectorOrString" - }, - "pattern": { - "type": "string", - "enum": [ - "mesh", - "one_to_one" - ] - }, - "link_count": { - "type": "integer", - "minimum": 1 - }, - "link_params": { - "type": "object", - "properties": { - "capacity": { - "type": "number" - }, - "cost": { - "type": "number" - }, - "disabled": { - "type": "boolean" - }, - "risk_groups": { - "type": "array", - "items": { - "type": "string" - } - }, - "attrs": { - "type": "object" - } - }, - "additionalProperties": false - }, - "expand_vars": { - "type": "object", - "description": "Variable substitutions using $var or ${var} syntax", - "additionalProperties": { - "type": "array", - "items": {} - } - }, - "expansion_mode": { - "type": "string", - "enum": [ - "cartesian", - "zip" - ], - "description": "How to combine expand_vars lists" - } - }, - "required": [ - "source", - "target" - ], - "additionalProperties": false - } + "additionalProperties": { + "type": "object", + "properties": { + "nodes": { + "type": "object", + "description": "Node definitions within the blueprint", + "additionalProperties": { + "$ref": "#/$defs/nodeDefinition" } }, - "additionalProperties": false - } - }, - "additionalProperties": false + "links": { + "type": "array", + "description": "Link definitions within the blueprint", + "items": { + "$ref": "#/$defs/linkDefinition" + } + } + }, + "additionalProperties": false + } }, "risk_groups": { "type": "array", @@ -574,10 +411,11 @@ "oneOf": [ { "type": "string", - "description": "String shorthand for simple risk group (equivalent to {name: 'GroupName'})" + "description": "String shorthand for simple risk group" }, { "type": "object", + "description": "Full risk group definition", "properties": { "name": { "type": "string", @@ -602,7 +440,7 @@ "type": "object", "description": "Policy-based membership rule for auto-assigning entities to this risk group", "properties": { - "entity_scope": { + "scope": { "type": "string", "enum": [ "node", @@ -611,66 +449,18 @@ ], "description": "Type of entities to match" }, + "path": { + "type": "string", + "description": "Regex filter on entity name" + }, "match": { - "type": "object", - "description": "Match specification", - "properties": { - "logic": { - "type": "string", - "enum": [ - "and", - "or" - ], - "description": "How to combine conditions (defaults to 'and' for membership rules)" - }, - "conditions": { - "type": "array", - "description": "List of conditions to match", - "items": { - "type": "object", - "properties": { - "attr": { - "type": "string", - "description": "Attribute name (supports dot-notation for nested attrs)" - }, - "operator": { - "type": "string", - "enum": [ - "==", - "!=", - ">", - "<", - ">=", - "<=", - "contains", - "not_contains", - "in", - "not_in", - "any_value", - "no_value" - ], - "description": "Comparison operator" - }, - "value": { - "description": "Value to compare against" - } - }, - "required": [ - "attr", - "operator" - ] - } - } - }, - "required": [ - "conditions" - ] + "$ref": "#/$defs/matchSpec" } }, "required": [ - "entity_scope", - "match" - ] + "scope" + ], + "additionalProperties": false } }, "required": [ @@ -686,32 +476,37 @@ "type": "object", "description": "Generate risk groups from unique attribute values", "properties": { - "entity_scope": { + "scope": { "type": "string", "enum": [ "node", "link" ], - "description": "Type of entities to group (node or link only, not risk_group)" + "description": "Type of entities to group (node or link only)" }, "group_by": { "type": "string", "description": "Attribute name to group by (supports dot-notation)" }, - "name_template": { + "name": { "type": "string", "description": "Template for generated group names. Use ${value} as placeholder" }, + "path": { + "type": "string", + "description": "Regex filter on entity name" + }, "attrs": { "type": "object", "description": "Static attributes for generated groups" } }, "required": [ - "entity_scope", + "scope", "group_by", - "name_template" - ] + "name" + ], + "additionalProperties": false } }, "required": [ @@ -722,250 +517,223 @@ ] } }, - "failure_policy_set": { + "failures": { "type": "object", "description": "Named failure policies for simulation", - "patternProperties": { - "^[a-zA-Z0-9_-]+$": { - "type": "object", - "properties": { - "attrs": { + "additionalProperties": { + "type": "object", + "properties": { + "attrs": { + "type": "object", + "description": "Policy metadata" + }, + "expand_groups": { + "type": "boolean", + "description": "Whether to fail risk groups" + }, + "expand_children": { + "type": "boolean", + "description": "Whether to recursively fail risk group children" + }, + "modes": { + "type": "array", + "description": "Weighted mode list; exactly one mode is chosen per iteration.", + "items": { "type": "object", - "description": "Policy metadata" - }, - "fail_risk_groups": { - "type": "boolean", - "description": "Whether to fail risk groups" - }, - "fail_risk_group_children": { - "type": "boolean", - "description": "Whether to recursively fail risk group children" - }, - "modes": { - "type": "array", - "description": "Weighted mode list; exactly one mode is chosen per iteration.", - "items": { - "type": "object", - "properties": { - "weight": { - "type": "number", - "minimum": 0 - }, - "attrs": { - "type": "object" - }, - "rules": { - "type": "array", - "items": { - "type": "object", - "properties": { - "entity_scope": { - "type": "string", - "enum": [ - "node", - "link", - "risk_group" - ], - "description": "What entities this rule applies to" - }, - "conditions": { - "type": "array", - "description": "Conditions that must be met", - "items": { - "$ref": "#/$defs/condition" - } - }, - "logic": { - "type": "string", - "enum": [ - "and", - "or" - ], - "description": "Logic for combining conditions" - }, - "rule_type": { - "type": "string", - "enum": [ - "all", - "choice", - "random" - ], - "description": "How to apply the rule" - }, - "probability": { - "type": "number", - "minimum": 0, - "maximum": 1, - "description": "Probability for random rule type" - }, - "count": { - "type": "integer", - "minimum": 1, - "description": "Number of entities to affect for choice rule type" - }, - "weight_by": { - "type": "string", - "description": "Attribute name used for weighted sampling in choice mode" - } + "properties": { + "weight": { + "type": "number", + "minimum": 0 + }, + "attrs": { + "type": "object" + }, + "rules": { + "type": "array", + "items": { + "type": "object", + "properties": { + "scope": { + "type": "string", + "enum": [ + "node", + "link", + "risk_group" + ], + "description": "What entities this rule applies to" + }, + "path": { + "type": "string", + "description": "Regex filter on entity name" + }, + "match": { + "$ref": "#/$defs/matchSpec" + }, + "mode": { + "type": "string", + "enum": [ + "all", + "choice", + "random" + ], + "description": "How to apply the rule" + }, + "probability": { + "type": "number", + "minimum": 0, + "maximum": 1, + "description": "Probability for random mode" }, - "additionalProperties": false - } + "count": { + "type": "integer", + "minimum": 1, + "description": "Number of entities to affect for choice mode" + }, + "weight_by": { + "type": "string", + "description": "Attribute name used for weighted sampling in choice mode" + } + }, + "required": [ + "scope" + ], + "additionalProperties": false } - }, - "required": [ - "weight", - "rules" - ], - "additionalProperties": false - } - } - }, - "additionalProperties": false - } - }, - "additionalProperties": false - }, - "traffic_matrix_set": { - "type": "object", - "description": "Named traffic demand matrices", - "patternProperties": { - "^[a-zA-Z0-9_-]+$": { - "type": "array", - "description": "List of traffic demands", - "items": { - "type": "object", - "properties": { - "source": { - "$ref": "#/$defs/selectorOrString", - "description": "Source node selector" - }, - "sink": { - "$ref": "#/$defs/selectorOrString", - "description": "Sink node selector" - }, - "demand": { - "type": "number", - "description": "Traffic demand amount" - }, - "priority": { - "type": "integer", - "description": "Priority class" - }, - "demand_placed": { - "type": "number", - "description": "Pre-placed demand amount" - }, - "mode": { - "type": "string", - "enum": [ - "combine", - "pairwise" - ], - "description": "Expansion mode" - }, - "group_mode": { - "type": "string", - "enum": [ - "flatten", - "per_group", - "group_pairwise" - ], - "description": "How grouped nodes produce demands" - }, - "expand_vars": { - "type": "object", - "description": "Variable substitutions using $var or ${var} syntax", - "additionalProperties": { - "type": "array" } }, - "expansion_mode": { - "type": "string", - "enum": [ - "cartesian", - "zip" - ] - }, - "flow_policy_config": { - "description": "Routing policy config", - "oneOf": [ - { - "type": "string" - }, - { - "type": "integer" - }, - { - "type": "object" - }, - { - "type": "null" - } - ] - }, - "flow_policy": { - "type": "object", - "description": "Inline FlowPolicy definition" - }, - "attrs": { - "type": "object", - "description": "Additional demand attributes" - } - }, - "additionalProperties": false + "required": [ + "weight", + "rules" + ], + "additionalProperties": false + } } - } - }, - "additionalProperties": false + }, + "additionalProperties": false + } }, - "components": { + "demands": { "type": "object", - "description": "Hardware component library", - "patternProperties": { - "^[a-zA-Z0-9_\\-\\+]+$": { + "description": "Named traffic demand sets", + "additionalProperties": { + "type": "array", + "description": "List of traffic demands", + "items": { "type": "object", "properties": { - "component_type": { - "type": "string" + "source": { + "$ref": "#/$defs/selectorOrString", + "description": "Source node selector" }, - "description": { - "type": "string" + "target": { + "$ref": "#/$defs/selectorOrString", + "description": "Target node selector" }, - "capex": { - "type": "number" + "volume": { + "type": "number", + "description": "Traffic demand volume" }, - "power_watts": { - "type": "number" + "priority": { + "type": "integer", + "description": "Priority class" }, - "power_watts_max": { - "type": "number" + "demand_placed": { + "type": "number", + "description": "Pre-placed demand volume" }, - "capacity": { - "type": "number" + "mode": { + "type": "string", + "enum": [ + "combine", + "pairwise" + ], + "description": "Expansion mode for source/target pairs" }, - "ports": { - "type": "integer" + "group_mode": { + "type": "string", + "enum": [ + "flatten", + "per_group", + "group_pairwise" + ], + "description": "How grouped nodes produce demands" }, - "count": { - "type": "integer" + "expand": { + "$ref": "#/$defs/expandBlock" }, - "attrs": { - "type": "object" + "flow_policy": { + "description": "Routing policy configuration (preset name or inline object)", + "oneOf": [ + { + "type": "string" + }, + { + "type": "integer" + }, + { + "type": "object" + }, + { + "type": "null" + } + ] }, - "children": { + "attrs": { "type": "object", - "patternProperties": { - "^[a-zA-Z0-9_\\-\\+]+$": { - "type": "object", - "additionalProperties": true - } - } + "description": "Additional demand attributes" } }, + "required": [ + "source", + "target" + ], "additionalProperties": false } - }, - "additionalProperties": false + } + }, + "components": { + "type": "object", + "description": "Hardware component library (preserved as-is)", + "additionalProperties": { + "type": "object", + "properties": { + "component_type": { + "type": "string" + }, + "description": { + "type": "string" + }, + "capex": { + "type": "number" + }, + "power_watts": { + "type": "number" + }, + "power_watts_max": { + "type": "number" + }, + "capacity": { + "type": "number" + }, + "ports": { + "type": "integer" + }, + "count": { + "type": "integer" + }, + "attrs": { + "type": "object" + }, + "children": { + "type": "object", + "additionalProperties": { + "type": "object", + "additionalProperties": true + } + } + }, + "additionalProperties": false + } }, "workflow": { "type": "array", @@ -973,9 +741,9 @@ "items": { "type": "object", "properties": { - "step_type": { + "type": { "type": "string", - "description": "Type of workflow step" + "description": "Type of workflow step (BuildGraph, NetworkStats, MaxFlow, TrafficMatrixPlacement, MaximumSupportedDemand, CostPower)" }, "name": { "type": "string", @@ -985,13 +753,24 @@ "$ref": "#/$defs/selectorOrString", "description": "Source node selector" }, - "sink": { + "target": { "$ref": "#/$defs/selectorOrString", - "description": "Sink node selector" + "description": "Target node selector" + }, + "demand_set": { + "type": "string", + "description": "Name of demand set to use" + }, + "failure_policy": { + "type": [ + "string", + "null" + ], + "description": "Name of failure policy to use (optional)" } }, "required": [ - "step_type" + "type" ], "additionalProperties": true } diff --git a/ngraph/types/dto.py b/ngraph/types/dto.py index 69ede0c..fbf6e16 100644 --- a/ngraph/types/dto.py +++ b/ngraph/types/dto.py @@ -1,6 +1,6 @@ """Types and data structures for algorithm analytics. -Defines immutable summary containers and aliases for algorithm outputs. +Defines immutable summary containers for algorithm outputs. """ from __future__ import annotations diff --git a/ngraph/workflow/base.py b/ngraph/workflow/base.py index 79ee552..f687ab4 100644 --- a/ngraph/workflow/base.py +++ b/ngraph/workflow/base.py @@ -1,8 +1,9 @@ """Base classes for workflow automation. Defines the workflow step abstraction, registration decorator, and execution -wrapper that adds timing and logging. Steps implement `run()` and are executed -via `execute()` which records metadata and re-raises failures. +lifecycle. Steps implement `run()` and are executed via `execute()` which +handles timing, logging, and metadata recording. Failures are logged and +re-raised. """ from __future__ import annotations @@ -67,7 +68,7 @@ class WorkflowStep(ABC): YAML Configuration: ```yaml workflow: - - step_type: + - type: name: "optional_step_name" # Optional: Custom name for this step instance seed: 42 # Optional: Seed for reproducible random operations # ... step-specific parameters ... diff --git a/ngraph/workflow/build_graph.py b/ngraph/workflow/build_graph.py index 5c11407..8ea9217 100644 --- a/ngraph/workflow/build_graph.py +++ b/ngraph/workflow/build_graph.py @@ -8,7 +8,7 @@ YAML Configuration Example: ```yaml workflow: - - step_type: BuildGraph + - type: BuildGraph name: "build_network_graph" # Optional: Custom name for this step add_reverse: true # Optional: Add reverse edges (default: true) ``` @@ -104,7 +104,6 @@ def run(self, scenario: Scenario) -> None: ) # Convert to node-link format for serialization - # Use edges="edges" for forward compatibility with NetworkX 3.6+ graph_dict = nx.node_link_data(graph, edges="edges") scenario.results.put( diff --git a/ngraph/workflow/cost_power.py b/ngraph/workflow/cost_power.py index 94dab81..6b7e85e 100644 --- a/ngraph/workflow/cost_power.py +++ b/ngraph/workflow/cost_power.py @@ -21,7 +21,7 @@ YAML Configuration Example: ```yaml workflow: - - step_type: CostPower + - type: CostPower name: "cost_power" # Optional custom name include_disabled: false # Default: only enabled nodes/links aggregation_level: 2 # Produce levels: 0, 1, 2 diff --git a/ngraph/workflow/max_flow_step.py b/ngraph/workflow/max_flow_step.py index 7f3f5d3..2d4e8b6 100644 --- a/ngraph/workflow/max_flow_step.py +++ b/ngraph/workflow/max_flow_step.py @@ -9,10 +9,10 @@ YAML Configuration Example: workflow: - - step_type: MaxFlow + - type: MaxFlow name: "maxflow_dc_to_edge" source: "^datacenter/.*" - sink: "^edge/.*" + target: "^edge/.*" mode: "combine" failure_policy: "random_failures" iterations: 100 @@ -59,7 +59,7 @@ class MaxFlow(WorkflowStep): Attributes: source: Source node selector (string path or selector dict). - sink: Sink node selector (string path or selector dict). + target: Target node selector (string path or selector dict). mode: Flow analysis mode ("combine" or "pairwise"). failure_policy: Name of failure policy in scenario.failure_policy_set. iterations: Number of failure iterations to run. @@ -75,7 +75,7 @@ class MaxFlow(WorkflowStep): """ source: Union[str, Dict[str, Any]] = "" - sink: Union[str, Dict[str, Any]] = "" + target: Union[str, Dict[str, Any]] = "" mode: str = "combine" failure_policy: str | None = None iterations: int = 1 @@ -106,10 +106,10 @@ def run(self, scenario: "Scenario") -> None: t0 = time.perf_counter() logger.info("Starting MaxFlow: name=%s", self.name) logger.debug( - "MaxFlow params: source=%s sink=%s mode=%s failure_iters=%d parallelism=%s " + "MaxFlow params: source=%s target=%s mode=%s failure_iters=%d parallelism=%s " "failure_policy=%s include_flow_details=%s include_min_cut=%s", self.source, - self.sink, + self.target, self.mode, self.iterations, self.parallelism, @@ -126,7 +126,7 @@ def run(self, scenario: "Scenario") -> None: effective_parallelism = resolve_parallelism(self.parallelism) raw = fm.run_max_flow_monte_carlo( source=self.source, - sink=self.sink, + target=self.target, mode=self.mode, iterations=self.iterations, parallelism=effective_parallelism, @@ -162,7 +162,7 @@ def run(self, scenario: "Scenario") -> None: context = { "source": self.source, - "sink": self.sink, + "target": self.target, "mode": self.mode, "shortest_path": bool(self.shortest_path), "require_capacity": bool(self.require_capacity), diff --git a/ngraph/workflow/maximum_supported_demand_step.py b/ngraph/workflow/maximum_supported_demand_step.py index 291f7c5..a3d3cdc 100644 --- a/ngraph/workflow/maximum_supported_demand_step.py +++ b/ngraph/workflow/maximum_supported_demand_step.py @@ -1,7 +1,7 @@ -"""Maximum Supported Demand (MSD) workflow step. +"""MaximumSupportedDemand workflow step. Searches for the maximum uniform traffic multiplier `alpha_star` that is fully -placeable for a given matrix. Stores results under `data` as: +placeable for a given demand set. Stores results under `data` as: - `alpha_star`: float - `context`: parameters used for the search @@ -10,6 +10,18 @@ Performance: AnalysisContext is built once at search start and reused across all binary search probes. Only demand volumes change per probe. + +YAML Configuration Example: + ```yaml + workflow: + - type: MaximumSupportedDemand + name: "msd_search" + demand_set: "default" + resolution: 0.01 # Convergence threshold + max_bisect_iters: 50 # Maximum bisection iterations + alpha_start: 1.0 # Starting multiplier + growth_factor: 2.0 # Bracket expansion factor + ``` """ from __future__ import annotations @@ -55,7 +67,26 @@ class _MSDCache: @dataclass class MaximumSupportedDemand(WorkflowStep): - matrix_name: str = "default" + """Finds the maximum uniform traffic multiplier that is fully placeable. + + Uses binary search to find alpha_star, the maximum multiplier for all + demands in the set that can still be fully placed on the network. + + Attributes: + demand_set: Name of the demand set to analyze. + acceptance_rule: Currently only "hard" is implemented. + alpha_start: Starting multiplier for binary search. + growth_factor: Factor for bracket expansion. + alpha_min: Minimum allowed alpha value. + alpha_max: Maximum allowed alpha value. + resolution: Convergence threshold for binary search. + max_bracket_iters: Maximum iterations for bracketing phase. + max_bisect_iters: Maximum iterations for bisection phase. + seeds_per_alpha: Number of placement attempts per alpha probe. + placement_rounds: Placement optimization rounds. + """ + + demand_set: str = "default" acceptance_rule: str = "hard" alpha_start: float = 1.0 growth_factor: float = 2.0 @@ -93,9 +124,9 @@ def run(self, scenario: "Any") -> None: t0 = time.perf_counter() logger.info("Starting MaximumSupportedDemand: name=%s", self.name) logger.debug( - "MaximumSupportedDemand params: matrix=%s alpha_start=%.6g " + "MaximumSupportedDemand params: demand_set=%s alpha_start=%.6g " "growth=%.3f seeds=%d resolution=%.6g", - self.matrix_name, + self.demand_set, float(self.alpha_start), float(self.growth_factor), int(self.seeds_per_alpha), @@ -105,17 +136,17 @@ def run(self, scenario: "Any") -> None: # Serialize base demands for result output from ngraph.model.flow.policy_config import serialize_policy_preset - base_tds = scenario.traffic_matrix_set.get_matrix(self.matrix_name) + base_tds = scenario.demand_set.get_set(self.demand_set) base_demands: list[dict[str, Any]] = [ { "id": getattr(td, "id", None), "source": getattr(td, "source", ""), - "sink": getattr(td, "sink", ""), - "demand": float(getattr(td, "demand", 0.0)), + "target": getattr(td, "target", ""), + "volume": float(getattr(td, "volume", 0.0)), "mode": getattr(td, "mode", "pairwise"), "priority": int(getattr(td, "priority", 0)), - "flow_policy_config": serialize_policy_preset( - getattr(td, "flow_policy_config", None) + "flow_policy": serialize_policy_preset( + getattr(td, "flow_policy", None) ), } for td in base_tds @@ -123,12 +154,12 @@ def run(self, scenario: "Any") -> None: if not base_demands: raise ValueError( - f"Traffic matrix '{self.matrix_name}' contains no demands. " + f"Demand set '{self.demand_set}' contains no demands. " "Cannot compute maximum supported demand without traffic specifications." ) # Build cache once for all probes - cache = self._build_cache(scenario, self.matrix_name) + cache = self._build_cache(scenario, self.demand_set) logger.debug( "MSD cache built: %d expanded demands", len(cache.base_expanded), @@ -155,7 +186,7 @@ def probe(alpha: float) -> tuple[bool, dict[str, Any]]: "max_bracket_iters": self.max_bracket_iters, "max_bisect_iters": self.max_bisect_iters, "seeds_per_alpha": self.seeds_per_alpha, - "matrix_name": self.matrix_name, + "demand_set": self.demand_set, "placement_rounds": self.placement_rounds, } scenario.results.put("metadata", {}) @@ -230,7 +261,7 @@ def _binary_search(self, probe: "Any") -> float: return left @staticmethod - def _build_cache(scenario: Any, matrix_name: str) -> _MSDCache: + def _build_cache(scenario: Any, demand_set_name: str) -> _MSDCache: """Build cache for MSD binary search. Creates stable TrafficDemand objects, expands them once, and builds @@ -238,21 +269,19 @@ def _build_cache(scenario: Any, matrix_name: str) -> _MSDCache: """ from ngraph.analysis import AnalysisContext - base_tds = scenario.traffic_matrix_set.get_matrix(matrix_name) + base_tds = scenario.demand_set.get_set(demand_set_name) # Create stable TrafficDemand objects (same IDs for all probes) stable_demands: list[TrafficDemand] = [ TrafficDemand( id=getattr(td, "id", "") or "", source=getattr(td, "source", ""), - sink=getattr(td, "sink", ""), + target=getattr(td, "target", ""), priority=int(getattr(td, "priority", 0)), - demand=float(getattr(td, "demand", 0.0)), - flow_policy_config=getattr(td, "flow_policy_config", None), + volume=float(getattr(td, "volume", 0.0)), + flow_policy=getattr(td, "flow_policy", None), mode=str(getattr(td, "mode", "pairwise")), group_mode=str(getattr(td, "group_mode", "flatten")), - expand_vars=getattr(td, "expand_vars", None) or {}, - expansion_mode=str(getattr(td, "expansion_mode", "cartesian")), ) for td in base_tds ] @@ -345,20 +374,18 @@ def _build_scaled_demands( """Build scaled TrafficDemand objects from serialized demands. Utility for tests to verify results at specific alpha values. - Preserves ID if present for context caching compatibility. + Preserves ID if present for stable context caching. """ return [ TrafficDemand( id=d.get("id") or "", source=d["source"], - sink=d["sink"], + target=d["target"], priority=int(d["priority"]), - demand=float(d["demand"]) * alpha, - flow_policy_config=d.get("flow_policy_config"), + volume=float(d["volume"]) * alpha, + flow_policy=d.get("flow_policy"), mode=str(d.get("mode", "pairwise")), group_mode=str(d.get("group_mode", "flatten")), - expand_vars=d.get("expand_vars") or {}, - expansion_mode=str(d.get("expansion_mode", "cartesian")), ) for d in base_demands ] diff --git a/ngraph/workflow/network_stats.py b/ngraph/workflow/network_stats.py index 70b3a63..058bea9 100644 --- a/ngraph/workflow/network_stats.py +++ b/ngraph/workflow/network_stats.py @@ -7,7 +7,7 @@ YAML Configuration Example: ```yaml workflow: - - step_type: NetworkStats + - type: NetworkStats name: "network_statistics" # Optional: Custom name for this step include_disabled: false # Include disabled nodes/links in stats excluded_nodes: ["node1", "node2"] # Optional: Temporary node exclusions diff --git a/ngraph/workflow/parse.py b/ngraph/workflow/parse.py index d324b0a..023d47a 100644 --- a/ngraph/workflow/parse.py +++ b/ngraph/workflow/parse.py @@ -22,7 +22,7 @@ def build_workflow_steps( """Instantiate workflow steps from normalized dictionaries. Args: - workflow_data: List of step dicts; each must have "step_type". + workflow_data: List of step dicts; each must have "type". derive_seed: Callable that takes a step name and returns a seed or None. Returns: @@ -35,18 +35,18 @@ def build_workflow_steps( assigned_names: set[str] = set() for step_index, step_info in enumerate(workflow_data): - step_type = step_info.get("step_type") + step_type = step_info.get("type") if not step_type: raise ValueError( - "Each workflow entry must have a 'step_type' field " + "Each workflow entry must have a 'type' field " "indicating the WorkflowStep subclass to use." ) step_cls = WORKFLOW_STEP_REGISTRY.get(step_type) if not step_cls: - raise ValueError(f"Unrecognized 'step_type': {step_type}") + raise ValueError(f"Unrecognized step 'type': {step_type}") - ctor_args = {k: v for k, v in step_info.items() if k != "step_type"} + ctor_args = {k: v for k, v in step_info.items() if k != "type"} normalized_ctor_args = normalize_yaml_dict_keys(ctor_args) raw_name = normalized_ctor_args.get("name") diff --git a/ngraph/workflow/traffic_matrix_placement_step.py b/ngraph/workflow/traffic_matrix_placement_step.py index 1109159..9d7f4e1 100644 --- a/ngraph/workflow/traffic_matrix_placement_step.py +++ b/ngraph/workflow/traffic_matrix_placement_step.py @@ -1,10 +1,23 @@ """TrafficMatrixPlacement workflow step. -Runs Monte Carlo demand placement using a named traffic matrix and produces +Runs Monte Carlo demand placement using a named demand set and produces unified `flow_results` per iteration under `data.flow_results`. Baseline (no failures) is always run first as a separate reference. The `iterations` parameter specifies how many failure scenarios to run. + +YAML Configuration Example: + ```yaml + workflow: + - type: TrafficMatrixPlacement + name: "tm_analysis" + demand_set: "default" + failure_policy: "single_link" # Optional: failure policy name + iterations: 100 # Number of failure scenarios + parallelism: 4 # Worker processes (or "auto") + alpha: 1.0 # Demand volume multiplier + include_flow_details: true # Include cost distribution per flow + ``` """ from __future__ import annotations @@ -30,7 +43,7 @@ @dataclass class TrafficMatrixPlacement(WorkflowStep): - """Monte Carlo demand placement using a named traffic matrix. + """Monte Carlo demand placement using a named demand set. Baseline (no failures) is always run first as a separate reference. Results are returned with baseline in a separate field. The flow_results list contains unique @@ -38,8 +51,8 @@ class TrafficMatrixPlacement(WorkflowStep): many iterations matched that pattern. Attributes: - matrix_name: Name of the traffic matrix to analyze. - failure_policy: Optional policy name in scenario.failure_policy_set. + demand_set: Name of the demand set to analyze. + failure_policy: Optional failure policy name in scenario.failure_policy_set. iterations: Number of failure iterations to run. parallelism: Number of parallel worker processes. placement_rounds: Placement optimization rounds (int or "auto"). @@ -47,12 +60,12 @@ class TrafficMatrixPlacement(WorkflowStep): store_failure_patterns: Whether to store failure pattern results. include_flow_details: When True, include cost_distribution per flow. include_used_edges: When True, include set of used edges per demand in entry data. - alpha: Numeric scale for demands in the matrix. + alpha: Numeric scale for demands in the set. alpha_from_step: Optional producer step name to read alpha from. alpha_from_field: Dotted field path in producer step (default: "data.alpha_star"). """ - matrix_name: str = "" + demand_set: str = "" failure_policy: str | None = None iterations: int = 1 parallelism: int | str = "auto" @@ -78,15 +91,15 @@ def __post_init__(self) -> None: raise ValueError("alpha must be > 0.0") def run(self, scenario: "Scenario") -> None: - if not self.matrix_name: - raise ValueError("'matrix_name' is required for TrafficMatrixPlacement") + if not self.demand_set: + raise ValueError("'demand_set' is required for TrafficMatrixPlacement") t0 = time.perf_counter() logger.info("Starting TrafficMatrixPlacement: name=%s", self.name) logger.debug( - "TrafficMatrixPlacement params: matrix_name=%s failure_iters=%d " + "TrafficMatrixPlacement params: demand_set=%s failure_iters=%d " "parallelism=%s placement_rounds=%s failure_policy=%s alpha=%s", - self.matrix_name, + self.demand_set, self.iterations, self.parallelism, self.placement_rounds, @@ -94,12 +107,12 @@ def run(self, scenario: "Scenario") -> None: self.alpha, ) - # Extract and serialize traffic matrix + # Extract and serialize demand set try: - td_list = scenario.traffic_matrix_set.get_matrix(self.matrix_name) + td_list = scenario.demand_set.get_set(self.demand_set) except KeyError as exc: raise ValueError( - f"Traffic matrix '{self.matrix_name}' not found in scenario." + f"Demand set '{self.demand_set}' not found in scenario." ) from exc from ngraph.model.flow.policy_config import serialize_policy_preset @@ -122,30 +135,26 @@ def run(self, scenario: "Scenario") -> None: { "id": td.id, "source": td.source, - "sink": td.sink, - "demand": float(td.demand) * float(effective_alpha), + "target": td.target, + "volume": float(td.volume) * float(effective_alpha), "mode": getattr(td, "mode", "pairwise"), - "flow_policy_config": getattr(td, "flow_policy_config", None), + "flow_policy": getattr(td, "flow_policy", None), "priority": getattr(td, "priority", 0), "group_mode": getattr(td, "group_mode", "flatten"), - "expand_vars": getattr(td, "expand_vars", None) or {}, - "expansion_mode": getattr(td, "expansion_mode", "cartesian"), } ) base_demands.append( { "id": td.id, "source": getattr(td, "source", ""), - "sink": getattr(td, "sink", ""), - "demand": float(getattr(td, "demand", 0.0)), + "target": getattr(td, "target", ""), + "volume": float(getattr(td, "volume", 0.0)), "mode": getattr(td, "mode", "pairwise"), "priority": int(getattr(td, "priority", 0)), - "flow_policy_config": serialize_policy_preset( - getattr(td, "flow_policy_config", None) + "flow_policy": serialize_policy_preset( + getattr(td, "flow_policy", None) ), "group_mode": getattr(td, "group_mode", "flatten"), - "expand_vars": getattr(td, "expand_vars", None) or {}, - "expansion_mode": getattr(td, "expansion_mode", "cartesian"), } ) @@ -205,7 +214,7 @@ def run(self, scenario: "Scenario") -> None: "baseline": baseline_dict, "flow_results": flow_results, "context": { - "matrix_name": self.matrix_name, + "demand_set": self.demand_set, "placement_rounds": self.placement_rounds, "include_flow_details": self.include_flow_details, "include_used_edges": self.include_used_edges, diff --git a/pyproject.toml b/pyproject.toml index 2df487f..d76c53d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" # --------------------------------------------------------------------- [project] name = "ngraph" -version = "0.16.0" +version = "0.17.0" description = "A tool and a library for network modeling and analysis." readme = "README.md" authors = [{ name = "Andrey Golovanov" }] diff --git a/scenarios/backbone_clos.yml b/scenarios/backbone_clos.yml index d7bb191..c7751ad 100644 --- a/scenarios/backbone_clos.yml +++ b/scenarios/backbone_clos.yml @@ -1,10 +1,10 @@ seed: 42 blueprints: Clos_L16_S4: - groups: + nodes: spine: - node_count: 4 - name_template: spine{node_num} + count: 4 + template: spine{n} attrs: role: spine tier: spine @@ -12,49 +12,48 @@ blueprints: component: SpineRouter count: 1 leaf: - node_count: 16 - name_template: leaf{node_num} + count: 16 + template: leaf{n} attrs: role: leaf tier: leaf hardware: component: LeafRouter count: 1 - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 3200 - cost: 1 - attrs: - link_type: leaf_spine - hardware: - source: - component: 800G-DR4 - count: 4.0 - target: - component: 1600G-2xDR4 - count: 2.0 + capacity: 3200 + cost: 1 + attrs: + link_type: leaf_spine + hardware: + source: + component: 800G-DR4 + count: 4.0 + target: + component: 1600G-2xDR4 + count: 2.0 DCRegion: - groups: + nodes: dc: - node_count: 1 - name_template: dc + count: 1 + template: dc attrs: role: dc - adjacency: [] + links: [] SingleRouter: - groups: + nodes: core: - node_count: 1 - name_template: core + count: 1 + template: core attrs: role: core hardware: component: CoreRouter count: 1 - adjacency: [] + links: [] components: 1600G-2xDR4: component_type: optic @@ -114,9 +113,9 @@ components: attrs: role: spine network: - groups: + nodes: metro1/pop[1-2]: - use_blueprint: Clos_L16_S4 + blueprint: Clos_L16_S4 attrs: metro_name: new-york-jersey-city-newark metro_name_orig: New York--Jersey City--Newark, NY--NJ @@ -126,7 +125,7 @@ network: radius_km: 51.75 node_type: pop metro1/dc[1-1]: - use_blueprint: DCRegion + blueprint: DCRegion attrs: metro_name: new-york-jersey-city-newark metro_name_orig: New York--Jersey City--Newark, NY--NJ @@ -138,7 +137,7 @@ network: mw_per_dc_region: 150.0 gbps_per_mw: 200.0 metro2/pop[1-2]: - use_blueprint: Clos_L16_S4 + blueprint: Clos_L16_S4 attrs: metro_name: washington-arlington metro_name_orig: Washington--Arlington, DC--VA--MD @@ -148,7 +147,7 @@ network: radius_km: 32.67 node_type: pop metro2/dc[1-1]: - use_blueprint: DCRegion + blueprint: DCRegion attrs: metro_name: washington-arlington metro_name_orig: Washington--Arlington, DC--VA--MD @@ -160,7 +159,7 @@ network: mw_per_dc_region: 150.0 gbps_per_mw: 200.0 metro3/pop[1-2]: - use_blueprint: SingleRouter + blueprint: SingleRouter attrs: metro_name: chicago metro_name_orig: Chicago, IL--IN @@ -170,17 +169,17 @@ network: radius_km: 43.9 node_type: pop metro4/pop[1-2]: - use_blueprint: SingleRouter + blueprint: SingleRouter attrs: metro_name: atlanta metro_name_orig: Atlanta, GA - metro_id: 03817 + metro_id: "03817" location_x: 1056698.7 location_y: 1255491.7 radius_km: 44.95 node_type: pop metro5/pop[1-2]: - use_blueprint: Clos_L16_S4 + blueprint: Clos_L16_S4 attrs: metro_name: columbus metro_name_orig: Columbus, OH @@ -190,7 +189,7 @@ network: radius_km: 20.63 node_type: pop metro5/dc[1-1]: - use_blueprint: DCRegion + blueprint: DCRegion attrs: metro_name: columbus metro_name_orig: Columbus, OH @@ -201,141 +200,137 @@ network: node_type: dc_region mw_per_dc_region: 150.0 gbps_per_mw: 200.0 - adjacency: + links: - source: path: metro1/pop1 match: &id001 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf target: path: metro1/pop2 match: *id001 pattern: one_to_one - link_params: - capacity: 1400.0 - cost: 163 - attrs: - link_type: intra_metro - source_metro: new-york-jersey-city-newark - target_metro: new-york-jersey-city-newark - target_capacity: 22400.0 - site_edge: metro1/pop1|metro1/pop2|intra_metro:new-york-jersey-city-newark:1-2 - adjacency_id: intra_metro:new-york-jersey-city-newark - distance_km: 163 - hardware: - source: - component: 800G-ZR+ - count: 2.0 - target: - component: 800G-ZR+ - count: 2.0 + capacity: 1400.0 + cost: 163 + attrs: + link_type: intra_metro + source_metro: new-york-jersey-city-newark + target_metro: new-york-jersey-city-newark + target_capacity: 22400.0 + site_edge: metro1/pop1|metro1/pop2|intra_metro:new-york-jersey-city-newark:1-2 + adjacency_id: intra_metro:new-york-jersey-city-newark + distance_km: 163 + hardware: + source: + component: 800G-ZR+ + count: 2.0 + target: + component: 800G-ZR+ + count: 2.0 - source: path: metro1/pop1 match: &id002 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: dc - attr: role - operator: == + op: == value: leaf logic: or target: path: metro1/dc1 match: *id002 pattern: one_to_one - link_params: - capacity: 2000.0 - cost: 1 - attrs: - link_type: dc_to_pop - source_metro: new-york-jersey-city-newark - target_metro: new-york-jersey-city-newark - target_capacity: 32000.0 - site_edge: metro1/pop1|metro1/dc1|dc_to_pop:new-york-jersey-city-newark:1-1 - adjacency_id: dc_to_pop:new-york-jersey-city-newark - distance_km: 1 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 + capacity: 2000.0 + cost: 1 + attrs: + link_type: dc_to_pop + source_metro: new-york-jersey-city-newark + target_metro: new-york-jersey-city-newark + target_capacity: 32000.0 + site_edge: metro1/pop1|metro1/dc1|dc_to_pop:new-york-jersey-city-newark:1-1 + adjacency_id: dc_to_pop:new-york-jersey-city-newark + distance_km: 1 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 - source: path: metro1/pop1 match: &id003 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf logic: or target: path: metro2/pop1 match: *id003 pattern: one_to_one - link_params: - capacity: 800.0 - cost: 412 - attrs: - link_type: inter_metro_corridor - source_metro: new-york-jersey-city-newark - target_metro: washington-arlington - target_capacity: 12800.0 - site_edge: metro1/pop1|metro2/pop1|inter_metro:1-2:1-1 - adjacency_id: inter_metro:1-2 - distance_km: 412 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 800.0 + cost: 412 + attrs: + link_type: inter_metro_corridor + source_metro: new-york-jersey-city-newark + target_metro: washington-arlington + target_capacity: 12800.0 + site_edge: metro1/pop1|metro2/pop1|inter_metro:1-2:1-1 + adjacency_id: inter_metro:1-2 + distance_km: 412 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro1/pop1 match: &id004 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf logic: or target: path: metro5/pop1 match: *id004 pattern: one_to_one - link_params: - capacity: 800.0 - cost: 977 - attrs: - link_type: inter_metro_corridor - source_metro: new-york-jersey-city-newark - target_metro: columbus - target_capacity: 12800.0 - site_edge: metro1/pop1|metro5/pop1|inter_metro:1-5:1-1 - adjacency_id: inter_metro:1-5 - distance_km: 977 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 800.0 + cost: 977 + attrs: + link_type: inter_metro_corridor + source_metro: new-york-jersey-city-newark + target_metro: columbus + target_capacity: 12800.0 + site_edge: metro1/pop1|metro5/pop1|inter_metro:1-5:1-1 + adjacency_id: inter_metro:1-5 + distance_km: 977 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro1/pop2 match: *id002 @@ -343,25 +338,23 @@ network: path: metro1/dc1 match: *id002 pattern: one_to_one - link_params: - capacity: 2000.0 - cost: 163 - attrs: - link_type: dc_to_pop - source_metro: new-york-jersey-city-newark - target_metro: new-york-jersey-city-newark - target_capacity: 32000.0 - site_edge: metro1/pop2|metro1/dc1|dc_to_pop:new-york-jersey-city-newark:1-2 - adjacency_id: dc_to_pop:new-york-jersey-city-newark - distance_km: 163 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 - # Inter-metro corridor connectivity (backbone links between metros) + capacity: 2000.0 + cost: 163 + attrs: + link_type: dc_to_pop + source_metro: new-york-jersey-city-newark + target_metro: new-york-jersey-city-newark + target_capacity: 32000.0 + site_edge: metro1/pop2|metro1/dc1|dc_to_pop:new-york-jersey-city-newark:1-2 + adjacency_id: dc_to_pop:new-york-jersey-city-newark + distance_km: 163 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 - source: path: metro1/pop2 match: *id003 @@ -369,24 +362,23 @@ network: path: metro2/pop2 match: *id003 pattern: one_to_one - link_params: - capacity: 800.0 - cost: 412 - attrs: - link_type: inter_metro_corridor - source_metro: new-york-jersey-city-newark - target_metro: washington-arlington - target_capacity: 12800.0 - site_edge: metro1/pop2|metro2/pop2|inter_metro:1-2:2-2 - adjacency_id: inter_metro:1-2 - distance_km: 412 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 800.0 + cost: 412 + attrs: + link_type: inter_metro_corridor + source_metro: new-york-jersey-city-newark + target_metro: washington-arlington + target_capacity: 12800.0 + site_edge: metro1/pop2|metro2/pop2|inter_metro:1-2:2-2 + adjacency_id: inter_metro:1-2 + distance_km: 412 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro1/pop2 match: *id004 @@ -394,740 +386,717 @@ network: path: metro5/pop2 match: *id004 pattern: one_to_one - link_params: - capacity: 800.0 - cost: 977 - attrs: - link_type: inter_metro_corridor - source_metro: new-york-jersey-city-newark - target_metro: columbus - target_capacity: 12800.0 - site_edge: metro1/pop2|metro5/pop2|inter_metro:1-5:2-2 - adjacency_id: inter_metro:1-5 - distance_km: 977 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 800.0 + cost: 977 + attrs: + link_type: inter_metro_corridor + source_metro: new-york-jersey-city-newark + target_metro: columbus + target_capacity: 12800.0 + site_edge: metro1/pop2|metro5/pop2|inter_metro:1-5:2-2 + adjacency_id: inter_metro:1-5 + distance_km: 977 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro2/pop1 - match: &id007 + match: &id005 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf target: path: metro2/pop2 - match: *id007 + match: *id005 pattern: one_to_one - link_params: - capacity: 1600.0 - cost: 103 - attrs: - link_type: intra_metro - source_metro: washington-arlington - target_metro: washington-arlington - target_capacity: 25600.0 - site_edge: metro2/pop1|metro2/pop2|intra_metro:washington-arlington:1-2 - adjacency_id: intra_metro:washington-arlington - distance_km: 103 - hardware: - source: - component: 800G-ZR+ - count: 2.0 - target: - component: 800G-ZR+ - count: 2.0 + capacity: 1600.0 + cost: 103 + attrs: + link_type: intra_metro + source_metro: washington-arlington + target_metro: washington-arlington + target_capacity: 25600.0 + site_edge: metro2/pop1|metro2/pop2|intra_metro:washington-arlington:1-2 + adjacency_id: intra_metro:washington-arlington + distance_km: 103 + hardware: + source: + component: 800G-ZR+ + count: 2.0 + target: + component: 800G-ZR+ + count: 2.0 - source: path: metro2/pop1 - match: &id008 + match: &id006 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: dc - attr: role - operator: == + op: == value: leaf logic: or target: path: metro2/dc1 - match: *id008 + match: *id006 pattern: one_to_one - link_params: - capacity: 2400.0 - cost: 1 - attrs: - link_type: dc_to_pop - source_metro: washington-arlington - target_metro: washington-arlington - target_capacity: 38400.0 - site_edge: metro2/pop1|metro2/dc1|dc_to_pop:washington-arlington:1-1 - adjacency_id: dc_to_pop:washington-arlington - distance_km: 1 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 + capacity: 2400.0 + cost: 1 + attrs: + link_type: dc_to_pop + source_metro: washington-arlington + target_metro: washington-arlington + target_capacity: 38400.0 + site_edge: metro2/pop1|metro2/dc1|dc_to_pop:washington-arlington:1-1 + adjacency_id: dc_to_pop:washington-arlington + distance_km: 1 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 - source: path: metro2/pop1 - match: &id009 + match: &id007 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf logic: or target: path: metro3/pop1 - match: *id009 + match: *id007 pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1261 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: chicago - target_capacity: 3200.0 - site_edge: metro2/pop1|metro3/pop1|inter_metro:2-3:1-1 - adjacency_id: inter_metro:2-3 - distance_km: 1261 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 200.0 + cost: 1261 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: chicago + target_capacity: 3200.0 + site_edge: metro2/pop1|metro3/pop1|inter_metro:2-3:1-1 + adjacency_id: inter_metro:2-3 + distance_km: 1261 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro2/pop1 - match: &id010 + match: &id008 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf logic: or target: path: metro4/pop1 - match: *id010 + match: *id008 pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1216 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: atlanta - target_capacity: 3200.0 - site_edge: metro2/pop1|metro4/pop1|inter_metro:2-4:1-1 - adjacency_id: inter_metro:2-4 - distance_km: 1216 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 200.0 + cost: 1216 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: atlanta + target_capacity: 3200.0 + site_edge: metro2/pop1|metro4/pop1|inter_metro:2-4:1-1 + adjacency_id: inter_metro:2-4 + distance_km: 1216 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro2/pop1 - match: &id011 + match: &id009 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf logic: or target: path: metro5/pop1 - match: *id011 + match: *id009 pattern: one_to_one - link_params: - capacity: 800.0 - cost: 733 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: columbus - target_capacity: 12800.0 - site_edge: metro2/pop1|metro5/pop1|inter_metro:2-5:1-1 - adjacency_id: inter_metro:2-5 - distance_km: 733 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 800.0 + cost: 733 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: columbus + target_capacity: 12800.0 + site_edge: metro2/pop1|metro5/pop1|inter_metro:2-5:1-1 + adjacency_id: inter_metro:2-5 + distance_km: 733 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro2/pop2 - match: *id008 + match: *id006 target: path: metro2/dc1 - match: *id008 + match: *id006 pattern: one_to_one - link_params: - capacity: 2400.0 - cost: 103 - attrs: - link_type: dc_to_pop - source_metro: washington-arlington - target_metro: washington-arlington - target_capacity: 38400.0 - site_edge: metro2/pop2|metro2/dc1|dc_to_pop:washington-arlington:1-2 - adjacency_id: dc_to_pop:washington-arlington - distance_km: 103 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 + capacity: 2400.0 + cost: 103 + attrs: + link_type: dc_to_pop + source_metro: washington-arlington + target_metro: washington-arlington + target_capacity: 38400.0 + site_edge: metro2/pop2|metro2/dc1|dc_to_pop:washington-arlington:1-2 + adjacency_id: dc_to_pop:washington-arlington + distance_km: 103 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 - source: path: metro2/pop2 - match: *id009 + match: *id007 target: path: metro3/pop2 - match: *id009 + match: *id007 pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1261 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: chicago - target_capacity: 3200.0 - site_edge: metro2/pop2|metro3/pop2|inter_metro:2-3:2-2 - adjacency_id: inter_metro:2-3 - distance_km: 1261 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 200.0 + cost: 1261 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: chicago + target_capacity: 3200.0 + site_edge: metro2/pop2|metro3/pop2|inter_metro:2-3:2-2 + adjacency_id: inter_metro:2-3 + distance_km: 1261 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro2/pop2 - match: *id010 + match: *id008 target: path: metro4/pop2 - match: *id010 + match: *id008 pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1216 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: atlanta - target_capacity: 3200.0 - site_edge: metro2/pop2|metro4/pop2|inter_metro:2-4:2-2 - adjacency_id: inter_metro:2-4 - distance_km: 1216 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 200.0 + cost: 1216 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: atlanta + target_capacity: 3200.0 + site_edge: metro2/pop2|metro4/pop2|inter_metro:2-4:2-2 + adjacency_id: inter_metro:2-4 + distance_km: 1216 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro2/pop2 - match: *id011 + match: *id009 target: path: metro5/pop2 - match: *id011 + match: *id009 pattern: one_to_one - link_params: - capacity: 800.0 - cost: 733 - attrs: - link_type: inter_metro_corridor - source_metro: washington-arlington - target_metro: columbus - target_capacity: 12800.0 - site_edge: metro2/pop2|metro5/pop2|inter_metro:2-5:2-2 - adjacency_id: inter_metro:2-5 - distance_km: 733 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 800.0 + cost: 733 + attrs: + link_type: inter_metro_corridor + source_metro: washington-arlington + target_metro: columbus + target_capacity: 12800.0 + site_edge: metro2/pop2|metro5/pop2|inter_metro:2-5:2-2 + adjacency_id: inter_metro:2-5 + distance_km: 733 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro3/pop1 - match: &id015 + match: &id010 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf target: path: metro3/pop2 - match: *id015 + match: *id010 pattern: one_to_one - link_params: - capacity: 12800.0 - cost: 138 - attrs: - link_type: intra_metro - source_metro: chicago - target_metro: chicago - target_capacity: 12800.0 - site_edge: metro3/pop1|metro3/pop2|intra_metro:chicago:1-2 - adjacency_id: intra_metro:chicago - distance_km: 138 - hardware: - source: - component: 800G-ZR+ - count: 16.0 - target: - component: 800G-ZR+ - count: 16.0 + capacity: 12800.0 + cost: 138 + attrs: + link_type: intra_metro + source_metro: chicago + target_metro: chicago + target_capacity: 12800.0 + site_edge: metro3/pop1|metro3/pop2|intra_metro:chicago:1-2 + adjacency_id: intra_metro:chicago + distance_km: 138 + hardware: + source: + component: 800G-ZR+ + count: 16.0 + target: + component: 800G-ZR+ + count: 16.0 - source: path: metro3/pop1 - match: &id016 + match: &id011 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf logic: or target: path: metro5/pop1 - match: *id016 + match: *id011 pattern: one_to_one - link_params: - capacity: 200.0 - cost: 658 - attrs: - link_type: inter_metro_corridor - source_metro: chicago - target_metro: columbus - target_capacity: 3200.0 - site_edge: metro3/pop1|metro5/pop1|inter_metro:3-5:1-1 - adjacency_id: inter_metro:3-5 - distance_km: 658 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 200.0 + cost: 658 + attrs: + link_type: inter_metro_corridor + source_metro: chicago + target_metro: columbus + target_capacity: 3200.0 + site_edge: metro3/pop1|metro5/pop1|inter_metro:3-5:1-1 + adjacency_id: inter_metro:3-5 + distance_km: 658 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro3/pop1 - match: &id017 + match: &id012 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf logic: or target: path: metro4/pop1 - match: *id017 + match: *id012 pattern: one_to_one - link_params: - capacity: 3200.0 - cost: 1302 - attrs: - link_type: inter_metro_corridor - source_metro: chicago - target_metro: atlanta - target_capacity: 3200.0 - site_edge: metro3/pop1|metro4/pop1|inter_metro:3-4:1-1 - adjacency_id: inter_metro:3-4 - distance_km: 1302 - hardware: - source: - component: 800G-ZR+ - count: 4.0 - target: - component: 800G-ZR+ - count: 4.0 + capacity: 3200.0 + cost: 1302 + attrs: + link_type: inter_metro_corridor + source_metro: chicago + target_metro: atlanta + target_capacity: 3200.0 + site_edge: metro3/pop1|metro4/pop1|inter_metro:3-4:1-1 + adjacency_id: inter_metro:3-4 + distance_km: 1302 + hardware: + source: + component: 800G-ZR+ + count: 4.0 + target: + component: 800G-ZR+ + count: 4.0 - source: path: metro3/pop2 - match: *id016 + match: *id011 target: path: metro5/pop2 - match: *id016 + match: *id011 pattern: one_to_one - link_params: - capacity: 200.0 - cost: 658 - attrs: - link_type: inter_metro_corridor - source_metro: chicago - target_metro: columbus - target_capacity: 3200.0 - site_edge: metro3/pop2|metro5/pop2|inter_metro:3-5:2-2 - adjacency_id: inter_metro:3-5 - distance_km: 658 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 200.0 + cost: 658 + attrs: + link_type: inter_metro_corridor + source_metro: chicago + target_metro: columbus + target_capacity: 3200.0 + site_edge: metro3/pop2|metro5/pop2|inter_metro:3-5:2-2 + adjacency_id: inter_metro:3-5 + distance_km: 658 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro3/pop2 - match: *id017 + match: *id012 target: path: metro4/pop2 - match: *id017 + match: *id012 pattern: one_to_one - link_params: - capacity: 3200.0 - cost: 1302 - attrs: - link_type: inter_metro_corridor - source_metro: chicago - target_metro: atlanta - target_capacity: 3200.0 - site_edge: metro3/pop2|metro4/pop2|inter_metro:3-4:2-2 - adjacency_id: inter_metro:3-4 - distance_km: 1302 - hardware: - source: - component: 800G-ZR+ - count: 4.0 - target: - component: 800G-ZR+ - count: 4.0 + capacity: 3200.0 + cost: 1302 + attrs: + link_type: inter_metro_corridor + source_metro: chicago + target_metro: atlanta + target_capacity: 3200.0 + site_edge: metro3/pop2|metro4/pop2|inter_metro:3-4:2-2 + adjacency_id: inter_metro:3-4 + distance_km: 1302 + hardware: + source: + component: 800G-ZR+ + count: 4.0 + target: + component: 800G-ZR+ + count: 4.0 - source: path: metro4/pop1 - match: &id020 + match: &id013 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf target: path: metro4/pop2 - match: *id020 + match: *id013 pattern: one_to_one - link_params: - capacity: 12800.0 - cost: 142 - attrs: - link_type: intra_metro - source_metro: atlanta - target_metro: atlanta - target_capacity: 12800.0 - site_edge: metro4/pop1|metro4/pop2|intra_metro:atlanta:1-2 - adjacency_id: intra_metro:atlanta - distance_km: 142 - hardware: - source: - component: 800G-ZR+ - count: 16.0 - target: - component: 800G-ZR+ - count: 16.0 + capacity: 12800.0 + cost: 142 + attrs: + link_type: intra_metro + source_metro: atlanta + target_metro: atlanta + target_capacity: 12800.0 + site_edge: metro4/pop1|metro4/pop2|intra_metro:atlanta:1-2 + adjacency_id: intra_metro:atlanta + distance_km: 142 + hardware: + source: + component: 800G-ZR+ + count: 16.0 + target: + component: 800G-ZR+ + count: 16.0 - source: path: metro4/pop1 - match: &id021 + match: &id014 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf logic: or target: path: metro5/pop1 - match: *id021 + match: *id014 pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1028 - attrs: - link_type: inter_metro_corridor - source_metro: atlanta - target_metro: columbus - target_capacity: 3200.0 - site_edge: metro4/pop1|metro5/pop1|inter_metro:4-5:1-1 - adjacency_id: inter_metro:4-5 - distance_km: 1028 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 200.0 + cost: 1028 + attrs: + link_type: inter_metro_corridor + source_metro: atlanta + target_metro: columbus + target_capacity: 3200.0 + site_edge: metro4/pop1|metro5/pop1|inter_metro:4-5:1-1 + adjacency_id: inter_metro:4-5 + distance_km: 1028 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro4/pop2 - match: *id021 + match: *id014 target: path: metro5/pop2 - match: *id021 + match: *id014 pattern: one_to_one - link_params: - capacity: 200.0 - cost: 1028 - attrs: - link_type: inter_metro_corridor - source_metro: atlanta - target_metro: columbus - target_capacity: 3200.0 - site_edge: metro4/pop2|metro5/pop2|inter_metro:4-5:2-2 - adjacency_id: inter_metro:4-5 - distance_km: 1028 - hardware: - source: - component: 800G-ZR+ - count: 1.0 - target: - component: 800G-ZR+ - count: 1.0 + capacity: 200.0 + cost: 1028 + attrs: + link_type: inter_metro_corridor + source_metro: atlanta + target_metro: columbus + target_capacity: 3200.0 + site_edge: metro4/pop2|metro5/pop2|inter_metro:4-5:2-2 + adjacency_id: inter_metro:4-5 + distance_km: 1028 + hardware: + source: + component: 800G-ZR+ + count: 1.0 + target: + component: 800G-ZR+ + count: 1.0 - source: path: metro5/pop1 - match: &id023 + match: &id015 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: leaf target: path: metro5/pop2 - match: *id023 + match: *id015 pattern: one_to_one - link_params: - capacity: 1600.0 - cost: 65 - attrs: - link_type: intra_metro - source_metro: columbus - target_metro: columbus - target_capacity: 25600.0 - site_edge: metro5/pop1|metro5/pop2|intra_metro:columbus:1-2 - adjacency_id: intra_metro:columbus - distance_km: 65 - hardware: - source: - component: 800G-ZR+ - count: 2.0 - target: - component: 800G-ZR+ - count: 2.0 + capacity: 1600.0 + cost: 65 + attrs: + link_type: intra_metro + source_metro: columbus + target_metro: columbus + target_capacity: 25600.0 + site_edge: metro5/pop1|metro5/pop2|intra_metro:columbus:1-2 + adjacency_id: intra_metro:columbus + distance_km: 65 + hardware: + source: + component: 800G-ZR+ + count: 2.0 + target: + component: 800G-ZR+ + count: 2.0 - source: path: metro5/pop1 - match: &id024 + match: &id016 conditions: - attr: role - operator: == + op: == value: core - attr: role - operator: == + op: == value: dc - attr: role - operator: == + op: == value: leaf logic: or target: path: metro5/dc1 - match: *id024 + match: *id016 pattern: one_to_one - link_params: - capacity: 2400.0 - cost: 1 - attrs: - link_type: dc_to_pop - source_metro: columbus - target_metro: columbus - target_capacity: 38400.0 - site_edge: metro5/pop1|metro5/dc1|dc_to_pop:columbus:1-1 - adjacency_id: dc_to_pop:columbus - distance_km: 1 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 + capacity: 2400.0 + cost: 1 + attrs: + link_type: dc_to_pop + source_metro: columbus + target_metro: columbus + target_capacity: 38400.0 + site_edge: metro5/pop1|metro5/dc1|dc_to_pop:columbus:1-1 + adjacency_id: dc_to_pop:columbus + distance_km: 1 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 - source: path: metro5/pop2 - match: *id024 + match: *id016 target: path: metro5/dc1 - match: *id024 + match: *id016 pattern: one_to_one - link_params: - capacity: 2400.0 - cost: 65 - attrs: - link_type: dc_to_pop - source_metro: columbus - target_metro: columbus - target_capacity: 38400.0 - site_edge: metro5/pop2|metro5/dc1|dc_to_pop:columbus:1-2 - adjacency_id: dc_to_pop:columbus - distance_km: 65 - hardware: - source: - component: 800G-ZR+ - count: 3.0 - target: - component: 800G-ZR+ - count: 3.0 + capacity: 2400.0 + cost: 65 + attrs: + link_type: dc_to_pop + source_metro: columbus + target_metro: columbus + target_capacity: 38400.0 + site_edge: metro5/pop2|metro5/dc1|dc_to_pop:columbus:1-2 + adjacency_id: dc_to_pop:columbus + distance_km: 65 + hardware: + source: + component: 800G-ZR+ + count: 3.0 + target: + component: 800G-ZR+ + count: 3.0 risk_groups: - # Auto-generate corridor risk groups from link adjacency_id attribute - # Creates one group per unique adjacency_id value (inter_metro, intra_metro, dc_to_pop) - generate: - entity_scope: link + scope: link group_by: adjacency_id - name_template: "corridor_risk_${value}" + name: corridor_risk_${value} attrs: type: corridor_risk generated: true -failure_policy_set: +failures: weighted_modes: attrs: description: "Balanced MC: SRLG + DC->PoP + node(maint) + intra-site fabric" modes: - weight: 0.3 rules: - - entity_scope: risk_group - rule_type: choice + - scope: risk_group + mode: choice count: 1 weight_by: distance_km - weight: 0.35 rules: - - entity_scope: link - rule_type: choice + - scope: link + mode: choice count: 3 conditions: - attr: link_type - operator: == + op: == value: dc_to_pop logic: and weight_by: target_capacity - weight: 0.25 rules: - - entity_scope: node - rule_type: choice + - scope: node + mode: choice count: 1 conditions: - attr: node_type - operator: "!=" + op: "!=" value: dc_region logic: and weight_by: attached_capacity_gbps - weight: 0.1 rules: - - entity_scope: link - rule_type: choice + - scope: link + mode: choice count: 4 conditions: - attr: link_type - operator: == + op: == value: leaf_spine - attr: link_type - operator: == + op: == value: intra_group - attr: link_type - operator: == + op: == value: inter_group - attr: link_type - operator: == + op: == value: internal_mesh logic: or -traffic_matrix_set: +demands: baseline_traffic_matrix: - source: ^metro1/dc1/.* - sink: ^metro2/dc1/.* + target: ^metro2/dc1/.* mode: pairwise priority: 0 - demand: 15000.0 + volume: 15000.0 attrs: euclidean_km: 330 - flow_policy_config: TE_WCMP_UNLIM + flow_policy: TE_WCMP_UNLIM - source: ^metro2/dc1/.* - sink: ^metro1/dc1/.* + target: ^metro1/dc1/.* mode: pairwise priority: 0 - demand: 15000.0 + volume: 15000.0 attrs: euclidean_km: 330 - flow_policy_config: TE_WCMP_UNLIM + flow_policy: TE_WCMP_UNLIM - source: ^metro1/dc1/.* - sink: ^metro5/dc1/.* + target: ^metro5/dc1/.* mode: pairwise priority: 0 - demand: 15000.0 + volume: 15000.0 attrs: euclidean_km: 748 - flow_policy_config: TE_WCMP_UNLIM + flow_policy: TE_WCMP_UNLIM - source: ^metro5/dc1/.* - sink: ^metro1/dc1/.* + target: ^metro1/dc1/.* mode: pairwise priority: 0 - demand: 15000.0 + volume: 15000.0 attrs: euclidean_km: 748 - flow_policy_config: TE_WCMP_UNLIM + flow_policy: TE_WCMP_UNLIM - source: ^metro2/dc1/.* - sink: ^metro5/dc1/.* + target: ^metro5/dc1/.* mode: pairwise priority: 0 - demand: 15000.0 + volume: 15000.0 attrs: euclidean_km: 508 - flow_policy_config: TE_WCMP_UNLIM + flow_policy: TE_WCMP_UNLIM - source: ^metro5/dc1/.* - sink: ^metro2/dc1/.* + target: ^metro2/dc1/.* mode: pairwise priority: 0 - demand: 15000.0 + volume: 15000.0 attrs: euclidean_km: 508 - flow_policy_config: TE_WCMP_UNLIM + flow_policy: TE_WCMP_UNLIM workflow: - - step_type: NetworkStats + - type: NetworkStats name: network_statistics - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd_baseline - matrix_name: baseline_traffic_matrix + demand_set: baseline_traffic_matrix acceptance_rule: hard alpha_start: 1.0 growth_factor: 2.0 @@ -1138,10 +1107,10 @@ workflow: max_bisect_iters: 32 seeds_per_alpha: 1 placement_rounds: 2 - - step_type: TrafficMatrixPlacement + - type: TrafficMatrixPlacement name: tm_placement seed: 42 - matrix_name: baseline_traffic_matrix + demand_set: baseline_traffic_matrix failure_policy: weighted_modes iterations: 1000 parallelism: 7 @@ -1151,7 +1120,7 @@ workflow: include_used_edges: false alpha_from_step: msd_baseline alpha_from_field: data.alpha_star - - step_type: CostPower + - type: CostPower name: cost_power include_disabled: true aggregation_level: 2 diff --git a/scenarios/nsfnet.yaml b/scenarios/nsfnet.yaml index e435f44..9ccea0d 100644 --- a/scenarios/nsfnet.yaml +++ b/scenarios/nsfnet.yaml @@ -1,716 +1,583 @@ -# NSFNET T3 (1992) topology scenario -# ref: Merit "NSFNET: A Partnership for High-Speed Networking" https://www.merit.edu/wp-content/uploads/2024/10/Merit-Network_NSFNET-A-Partnership-for-High-Speed-Networking.pdf -# ref: NANOG handy.node.list, 22 May 1992 https://mailman.nanog.org/pipermail/nanog/1992-May/108697.html -# -# ------------------------------------------------------------------------------ -# Model notes -# -# • `site_type: core` - CNSS POPs built with IBM RS/6000-based routers and -# multiple T3 interface cards. These sites form the nationwide DS-3 -# (44.736 Mb/s) backbone that entered full production in 1992. -# -# • `site_type: edge` - ENSS gateways and the two "additional sites served" -# (Cambridge MA & NASA Ames). Each connects to one nearby CNSS via a -# single DS-3 spur and does not forward transit traffic. -# -# • Links - One record per physical DS-3 circuit. Capacities are expressed -# as `capacity: 45000.0`; latency-based IGRP costs follow 1992 ANS -# engineering notes. No parallel circuits are collapsed in this model. -# ------------------------------------------------------------------------------ seed: 5678 -############################################################################### -# Network Topology -############################################################################### network: - name: "NSFNET T3 (1992)" + name: NSFNET T3 (1992) version: 1.1 nodes: - # ----- CNSS core POPs -------------------------------------------------------- - Seattle: { attrs: { site_type: core } } - PaloAlto: { attrs: { site_type: core } } - LosAngeles: { attrs: { site_type: core } } - SaltLakeCity: { attrs: { site_type: core } } - Denver: { attrs: { site_type: core } } - Lincoln: { attrs: { site_type: core } } - StLouis: { attrs: { site_type: core } } - Chicago: { attrs: { site_type: core } } - Cleveland: { attrs: { site_type: core } } - NewYork: { attrs: { site_type: core } } - WashingtonDC: { attrs: { site_type: core } } - Greensboro: { attrs: { site_type: core } } - Atlanta: { attrs: { site_type: core } } - Houston: { attrs: { site_type: core } } - AnnArbor: { attrs: { site_type: core } } - Hartford: { attrs: { site_type: core } } - # ----- ENSS / super-computer & "additional" sites ----------------------- - Cambridge: { attrs: { site_type: edge } } # NEARnet - additional site - Argonne: { attrs: { site_type: edge } } # additional site - SanDiego: { attrs: { site_type: edge } } - Boulder: { attrs: { site_type: edge } } - Princeton: { attrs: { site_type: edge } } - Ithaca: { attrs: { site_type: edge } } - CollegePark: { attrs: { site_type: edge } } - Pittsburgh: { attrs: { site_type: edge } } - UrbanaChampaign: { attrs: { site_type: edge } } - MoffettField: { attrs: { site_type: edge } } # NASA Ames additional site - + Seattle: + attrs: + site_type: core + PaloAlto: + attrs: + site_type: core + LosAngeles: + attrs: + site_type: core + SaltLakeCity: + attrs: + site_type: core + Denver: + attrs: + site_type: core + Lincoln: + attrs: + site_type: core + StLouis: + attrs: + site_type: core + Chicago: + attrs: + site_type: core + Cleveland: + attrs: + site_type: core + NewYork: + attrs: + site_type: core + WashingtonDC: + attrs: + site_type: core + Greensboro: + attrs: + site_type: core + Atlanta: + attrs: + site_type: core + Houston: + attrs: + site_type: core + AnnArbor: + attrs: + site_type: core + Hartford: + attrs: + site_type: core + Cambridge: + attrs: + site_type: edge + Argonne: + attrs: + site_type: edge + SanDiego: + attrs: + site_type: edge + Boulder: + attrs: + site_type: edge + Princeton: + attrs: + site_type: edge + Ithaca: + attrs: + site_type: edge + CollegePark: + attrs: + site_type: edge + Pittsburgh: + attrs: + site_type: edge + UrbanaChampaign: + attrs: + site_type: edge + MoffettField: + attrs: + site_type: edge links: - # Northern arc - - { - source: NewYork, - target: Cleveland, - link_params: - { - capacity: 45000.0, - cost: 4, - risk_groups: [RG_Cleveland_NewYork], - attrs: { circuit: A }, - }, - } - - { - source: NewYork, - target: Cleveland, - link_params: - { - capacity: 45000.0, - cost: 4, - risk_groups: [RG_Cleveland_NewYork], - attrs: { circuit: B }, - }, - } - - { - source: Cleveland, - target: Chicago, - link_params: - { - capacity: 45000.0, - cost: 6, - risk_groups: [RG_Cleveland_Chicago], - attrs: { circuit: A }, - }, - } - - { - source: Cleveland, - target: Chicago, - link_params: - { - capacity: 45000.0, - cost: 6, - risk_groups: [RG_Cleveland_Chicago], - attrs: { circuit: B }, - }, - } - - { - source: Chicago, - target: PaloAlto, - link_params: - { - capacity: 45000.0, - cost: 12, - risk_groups: [RG_Chicago_PaloAlto], - attrs: { circuit: A }, - }, - } - - { - source: Chicago, - target: PaloAlto, - link_params: - { - capacity: 45000.0, - cost: 12, - risk_groups: [RG_Chicago_PaloAlto], - attrs: { circuit: B }, - }, - } - - # Southern arc - - { - source: NewYork, - target: WashingtonDC, - link_params: - { - capacity: 45000.0, - cost: 4, - risk_groups: [RG_NewYork_WashingtonDC], - attrs: { circuit: A }, - }, - } - - { - source: NewYork, - target: WashingtonDC, - link_params: - { - capacity: 45000.0, - cost: 4, - risk_groups: [RG_NewYork_WashingtonDC], - attrs: { circuit: B }, - }, - } - - { - source: WashingtonDC, - target: Greensboro, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_WashingtonDC_Greensboro], - attrs: { circuit: A }, - }, - } - - { - source: WashingtonDC, - target: Greensboro, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_WashingtonDC_Greensboro], - attrs: { circuit: B }, - }, - } - - { - source: Greensboro, - target: Atlanta, - link_params: - { - capacity: 45000.0, - cost: 7, - risk_groups: [RG_Greensboro_Atlanta], - attrs: { circuit: A }, - }, - } - - { - source: Greensboro, - target: Atlanta, - link_params: - { - capacity: 45000.0, - cost: 7, - risk_groups: [RG_Greensboro_Atlanta], - attrs: { circuit: B }, - }, - } - - { - source: Atlanta, - target: Houston, - link_params: - { - capacity: 45000.0, - cost: 10, - risk_groups: [RG_Atlanta_Houston], - attrs: { circuit: A }, - }, - } - - { - source: Atlanta, - target: Houston, - link_params: - { - capacity: 45000.0, - cost: 10, - risk_groups: [RG_Atlanta_Houston], - attrs: { circuit: B }, - }, - } - - { - source: Houston, - target: LosAngeles, - link_params: - { - capacity: 45000.0, - cost: 14, - risk_groups: [RG_Houston_LosAngeles], - attrs: { circuit: A }, - }, - } - - { - source: Houston, - target: LosAngeles, - link_params: - { - capacity: 45000.0, - cost: 14, - risk_groups: [RG_Houston_LosAngeles], - attrs: { circuit: B }, - }, - } - - { - source: LosAngeles, - target: PaloAlto, - link_params: - { - capacity: 45000.0, - cost: 8, - risk_groups: [RG_LosAngeles_PaloAlto], - attrs: { circuit: A }, - }, - } - - { - source: LosAngeles, - target: PaloAlto, - link_params: - { - capacity: 45000.0, - cost: 8, - risk_groups: [RG_LosAngeles_PaloAlto], - attrs: { circuit: B }, - }, - } - - # Pacific NW & Rockies - - { - source: Seattle, - target: PaloAlto, - link_params: - { - capacity: 45000.0, - cost: 9, - risk_groups: [RG_PaloAlto_Seattle], - attrs: { circuit: A }, - }, - } - - { - source: Seattle, - target: PaloAlto, - link_params: - { - capacity: 45000.0, - cost: 9, - risk_groups: [RG_PaloAlto_Seattle], - attrs: { circuit: B }, - }, - } - - { - source: Seattle, - target: SaltLakeCity, - link_params: - { - capacity: 45000.0, - cost: 10, - risk_groups: [RG_Seattle_SaltLakeCity], - attrs: { circuit: A }, - }, - } - - { - source: Seattle, - target: SaltLakeCity, - link_params: - { - capacity: 45000.0, - cost: 10, - risk_groups: [RG_Seattle_SaltLakeCity], - attrs: { circuit: B }, - }, - } - - { - source: SaltLakeCity, - target: Denver, - link_params: - { - capacity: 45000.0, - cost: 9, - risk_groups: [RG_SaltLakeCity_Denver], - attrs: { circuit: A }, - }, - } - - { - source: SaltLakeCity, - target: Denver, - link_params: - { - capacity: 45000.0, - cost: 9, - risk_groups: [RG_SaltLakeCity_Denver], - attrs: { circuit: B }, - }, - } - - { - source: Denver, - target: Lincoln, - link_params: - { - capacity: 45000.0, - cost: 8, - risk_groups: [RG_Denver_Lincoln], - attrs: { circuit: A }, - }, - } - - { - source: Denver, - target: Lincoln, - link_params: - { - capacity: 45000.0, - cost: 8, - risk_groups: [RG_Denver_Lincoln], - attrs: { circuit: B }, - }, - } - - { - source: Lincoln, - target: StLouis, - link_params: - { - capacity: 45000.0, - cost: 6, - risk_groups: [RG_Lincoln_StLouis], - attrs: { circuit: A }, - }, - } - - { - source: Lincoln, - target: StLouis, - link_params: - { - capacity: 45000.0, - cost: 6, - risk_groups: [RG_Lincoln_StLouis], - attrs: { circuit: B }, - }, - } - - { - source: StLouis, - target: Chicago, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_StLouis_Chicago], - attrs: { circuit: A }, - }, - } - - { - source: StLouis, - target: Chicago, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_StLouis_Chicago], - attrs: { circuit: B }, - }, - } - - # Midwest shortcuts - - { - source: Cleveland, - target: StLouis, - link_params: - { - capacity: 45000.0, - cost: 7, - risk_groups: [RG_Cleveland_StLouis], - attrs: { circuit: A }, - }, - } - - { - source: Cleveland, - target: StLouis, - link_params: - { - capacity: 45000.0, - cost: 7, - risk_groups: [RG_Cleveland_StLouis], - attrs: { circuit: B }, - }, - } - - { - source: Denver, - target: SaltLakeCity, - link_params: - { - capacity: 45000.0, - cost: 9, - risk_groups: [RG_Denver_SaltLakeCity], - attrs: { circuit: A }, - }, - } - - { - source: Denver, - target: SaltLakeCity, - link_params: - { - capacity: 45000.0, - cost: 9, - risk_groups: [RG_Denver_SaltLakeCity], - attrs: { circuit: B }, - }, - } - - # Great-Lakes loop - - { - source: Chicago, - target: AnnArbor, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_Chicago_AnnArbor], - attrs: { circuit: A }, - }, - } - - { - source: Chicago, - target: AnnArbor, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_Chicago_AnnArbor], - attrs: { circuit: B }, - }, - } - - { - source: AnnArbor, - target: Cleveland, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_AnnArbor_Cleveland], - attrs: { circuit: A }, - }, - } - - { - source: AnnArbor, - target: Cleveland, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_AnnArbor_Cleveland], - attrs: { circuit: B }, - }, - } - - # Hartford hub - - { - source: Hartford, - target: NewYork, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_Hartford_NewYork], - attrs: { circuit: A }, - }, - } - - { - source: Hartford, - target: NewYork, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_Hartford_NewYork], - attrs: { circuit: B }, - }, - } - - { - source: Hartford, - target: WashingtonDC, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_Hartford_WashingtonDC], - attrs: { circuit: A }, - }, - } - - { - source: Hartford, - target: WashingtonDC, - link_params: - { - capacity: 45000.0, - cost: 5, - risk_groups: [RG_Hartford_WashingtonDC], - attrs: { circuit: B }, - }, - } - - # Northeast spur - single circuits (no SRLG needed) - - { - source: Princeton, - target: Ithaca, - link_params: { capacity: 45000.0, cost: 5, attrs: { circuit: A } }, - } - - { - source: Princeton, - target: WashingtonDC, - link_params: { capacity: 45000.0, cost: 4, attrs: { circuit: A } }, - } - - { - source: CollegePark, - target: WashingtonDC, - link_params: { capacity: 45000.0, cost: 3, attrs: { circuit: A } }, - } - - { - source: CollegePark, - target: NewYork, - link_params: { capacity: 45000.0, cost: 6, attrs: { circuit: A } }, - } - - { - source: Cambridge, - target: NewYork, - link_params: { capacity: 45000.0, cost: 6, attrs: { circuit: A } }, - } - - # ENSS & "additional site" spurs - single circuits - - { - source: Argonne, - target: Chicago, - link_params: { capacity: 45000.0, cost: 4, attrs: { circuit: A } }, - } - - { - source: SanDiego, - target: LosAngeles, - link_params: { capacity: 45000.0, cost: 6, attrs: { circuit: A } }, - } - - { - source: Boulder, - target: Denver, - link_params: { capacity: 45000.0, cost: 4, attrs: { circuit: A } }, - } - - { - source: Pittsburgh, - target: Cleveland, - link_params: { capacity: 45000.0, cost: 4, attrs: { circuit: A } }, - } - - { - source: UrbanaChampaign, - target: Chicago, - link_params: { capacity: 45000.0, cost: 4, attrs: { circuit: A } }, - } - - { - source: MoffettField, - target: PaloAlto, - link_params: { capacity: 45000.0, cost: 6, attrs: { circuit: A } }, - } - -############################################################################### -# Shared-risk groups - one per span that carried parallel A- and B-circuits -############################################################################### + - source: NewYork + target: Cleveland + capacity: 45000.0 + cost: 4 + risk_groups: + - RG_Cleveland_NewYork + attrs: + circuit: A + - source: NewYork + target: Cleveland + capacity: 45000.0 + cost: 4 + risk_groups: + - RG_Cleveland_NewYork + attrs: + circuit: B + - source: Cleveland + target: Chicago + capacity: 45000.0 + cost: 6 + risk_groups: + - RG_Cleveland_Chicago + attrs: + circuit: A + - source: Cleveland + target: Chicago + capacity: 45000.0 + cost: 6 + risk_groups: + - RG_Cleveland_Chicago + attrs: + circuit: B + - source: Chicago + target: PaloAlto + capacity: 45000.0 + cost: 12 + risk_groups: + - RG_Chicago_PaloAlto + attrs: + circuit: A + - source: Chicago + target: PaloAlto + capacity: 45000.0 + cost: 12 + risk_groups: + - RG_Chicago_PaloAlto + attrs: + circuit: B + - source: NewYork + target: WashingtonDC + capacity: 45000.0 + cost: 4 + risk_groups: + - RG_NewYork_WashingtonDC + attrs: + circuit: A + - source: NewYork + target: WashingtonDC + capacity: 45000.0 + cost: 4 + risk_groups: + - RG_NewYork_WashingtonDC + attrs: + circuit: B + - source: WashingtonDC + target: Greensboro + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_WashingtonDC_Greensboro + attrs: + circuit: A + - source: WashingtonDC + target: Greensboro + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_WashingtonDC_Greensboro + attrs: + circuit: B + - source: Greensboro + target: Atlanta + capacity: 45000.0 + cost: 7 + risk_groups: + - RG_Greensboro_Atlanta + attrs: + circuit: A + - source: Greensboro + target: Atlanta + capacity: 45000.0 + cost: 7 + risk_groups: + - RG_Greensboro_Atlanta + attrs: + circuit: B + - source: Atlanta + target: Houston + capacity: 45000.0 + cost: 10 + risk_groups: + - RG_Atlanta_Houston + attrs: + circuit: A + - source: Atlanta + target: Houston + capacity: 45000.0 + cost: 10 + risk_groups: + - RG_Atlanta_Houston + attrs: + circuit: B + - source: Houston + target: LosAngeles + capacity: 45000.0 + cost: 14 + risk_groups: + - RG_Houston_LosAngeles + attrs: + circuit: A + - source: Houston + target: LosAngeles + capacity: 45000.0 + cost: 14 + risk_groups: + - RG_Houston_LosAngeles + attrs: + circuit: B + - source: LosAngeles + target: PaloAlto + capacity: 45000.0 + cost: 8 + risk_groups: + - RG_LosAngeles_PaloAlto + attrs: + circuit: A + - source: LosAngeles + target: PaloAlto + capacity: 45000.0 + cost: 8 + risk_groups: + - RG_LosAngeles_PaloAlto + attrs: + circuit: B + - source: Seattle + target: PaloAlto + capacity: 45000.0 + cost: 9 + risk_groups: + - RG_PaloAlto_Seattle + attrs: + circuit: A + - source: Seattle + target: PaloAlto + capacity: 45000.0 + cost: 9 + risk_groups: + - RG_PaloAlto_Seattle + attrs: + circuit: B + - source: Seattle + target: SaltLakeCity + capacity: 45000.0 + cost: 10 + risk_groups: + - RG_Seattle_SaltLakeCity + attrs: + circuit: A + - source: Seattle + target: SaltLakeCity + capacity: 45000.0 + cost: 10 + risk_groups: + - RG_Seattle_SaltLakeCity + attrs: + circuit: B + - source: SaltLakeCity + target: Denver + capacity: 45000.0 + cost: 9 + risk_groups: + - RG_SaltLakeCity_Denver + attrs: + circuit: A + - source: SaltLakeCity + target: Denver + capacity: 45000.0 + cost: 9 + risk_groups: + - RG_SaltLakeCity_Denver + attrs: + circuit: B + - source: Denver + target: Lincoln + capacity: 45000.0 + cost: 8 + risk_groups: + - RG_Denver_Lincoln + attrs: + circuit: A + - source: Denver + target: Lincoln + capacity: 45000.0 + cost: 8 + risk_groups: + - RG_Denver_Lincoln + attrs: + circuit: B + - source: Lincoln + target: StLouis + capacity: 45000.0 + cost: 6 + risk_groups: + - RG_Lincoln_StLouis + attrs: + circuit: A + - source: Lincoln + target: StLouis + capacity: 45000.0 + cost: 6 + risk_groups: + - RG_Lincoln_StLouis + attrs: + circuit: B + - source: StLouis + target: Chicago + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_StLouis_Chicago + attrs: + circuit: A + - source: StLouis + target: Chicago + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_StLouis_Chicago + attrs: + circuit: B + - source: Cleveland + target: StLouis + capacity: 45000.0 + cost: 7 + risk_groups: + - RG_Cleveland_StLouis + attrs: + circuit: A + - source: Cleveland + target: StLouis + capacity: 45000.0 + cost: 7 + risk_groups: + - RG_Cleveland_StLouis + attrs: + circuit: B + - source: Denver + target: SaltLakeCity + capacity: 45000.0 + cost: 9 + risk_groups: + - RG_Denver_SaltLakeCity + attrs: + circuit: A + - source: Denver + target: SaltLakeCity + capacity: 45000.0 + cost: 9 + risk_groups: + - RG_Denver_SaltLakeCity + attrs: + circuit: B + - source: Chicago + target: AnnArbor + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_Chicago_AnnArbor + attrs: + circuit: A + - source: Chicago + target: AnnArbor + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_Chicago_AnnArbor + attrs: + circuit: B + - source: AnnArbor + target: Cleveland + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_AnnArbor_Cleveland + attrs: + circuit: A + - source: AnnArbor + target: Cleveland + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_AnnArbor_Cleveland + attrs: + circuit: B + - source: Hartford + target: NewYork + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_Hartford_NewYork + attrs: + circuit: A + - source: Hartford + target: NewYork + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_Hartford_NewYork + attrs: + circuit: B + - source: Hartford + target: WashingtonDC + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_Hartford_WashingtonDC + attrs: + circuit: A + - source: Hartford + target: WashingtonDC + capacity: 45000.0 + cost: 5 + risk_groups: + - RG_Hartford_WashingtonDC + attrs: + circuit: B + - source: Princeton + target: Ithaca + capacity: 45000.0 + cost: 5 + attrs: + circuit: A + - source: Princeton + target: WashingtonDC + capacity: 45000.0 + cost: 4 + attrs: + circuit: A + - source: CollegePark + target: WashingtonDC + capacity: 45000.0 + cost: 3 + attrs: + circuit: A + - source: CollegePark + target: NewYork + capacity: 45000.0 + cost: 6 + attrs: + circuit: A + - source: Cambridge + target: NewYork + capacity: 45000.0 + cost: 6 + attrs: + circuit: A + - source: Argonne + target: Chicago + capacity: 45000.0 + cost: 4 + attrs: + circuit: A + - source: SanDiego + target: LosAngeles + capacity: 45000.0 + cost: 6 + attrs: + circuit: A + - source: Boulder + target: Denver + capacity: 45000.0 + cost: 4 + attrs: + circuit: A + - source: Pittsburgh + target: Cleveland + capacity: 45000.0 + cost: 4 + attrs: + circuit: A + - source: UrbanaChampaign + target: Chicago + capacity: 45000.0 + cost: 4 + attrs: + circuit: A + - source: MoffettField + target: PaloAlto + capacity: 45000.0 + cost: 6 + attrs: + circuit: A risk_groups: - - { - name: RG_AnnArbor_Cleveland, - attrs: { description: "Great-Lakes loop DS-3 pair" }, - } - - { - name: RG_Atlanta_Houston, - attrs: { description: "Southern arc DS-3 pair" }, - } - - { - name: RG_Cleveland_Chicago, - attrs: { description: "Northern arc DS-3 pair" }, - } - - { - name: RG_Cleveland_NewYork, - attrs: { description: "Northern arc DS-3 pair" }, - } - - { - name: RG_Cleveland_StLouis, - attrs: { description: "Mid-west shortcut DS-3 pair" }, - } - - { - name: RG_Chicago_AnnArbor, - attrs: { description: "Great-Lakes loop DS-3 pair" }, - } - - { - name: RG_Chicago_PaloAlto, - attrs: { description: "Trans-continental northern DS-3 pair" }, - } - - { name: RG_Denver_Lincoln, attrs: { description: "Rockies DS-3 pair" } } - - { - name: RG_Denver_SaltLakeCity, - attrs: { description: "Rockies DS-3 pair" }, - } - - { - name: RG_Greensboro_Atlanta, - attrs: { description: "Southern arc DS-3 pair" }, - } - - { - name: RG_Hartford_NewYork, - attrs: { description: "Hartford hub DS-3 pair" }, - } - - { - name: RG_Hartford_WashingtonDC, - attrs: { description: "Hartford hub DS-3 pair" }, - } - - { - name: RG_Houston_LosAngeles, - attrs: { description: "Southern arc DS-3 pair" }, - } - - { name: RG_Lincoln_StLouis, attrs: { description: "Rockies DS-3 pair" } } - - { - name: RG_LosAngeles_PaloAlto, - attrs: { description: "California DS-3 pair" }, - } - - { - name: RG_NewYork_WashingtonDC, - attrs: { description: "Southern arc DS-3 pair" }, - } - - { - name: RG_PaloAlto_Seattle, - attrs: { description: "Pacific-Northwest DS-3 pair" }, - } - - { - name: RG_Seattle_SaltLakeCity, - attrs: { description: "Pacific-Northwest DS-3 pair" }, - } - - { - name: RG_SaltLakeCity_Denver, - attrs: { description: "Rockies DS-3 pair" }, - } - - { name: RG_StLouis_Chicago, attrs: { description: "Rockies DS-3 pair" } } - - { - name: RG_WashingtonDC_Greensboro, - attrs: { description: "Southern arc DS-3 pair" }, - } - -############################################################################### -# Failure policies -############################################################################### -failure_policy_set: + - name: RG_AnnArbor_Cleveland + attrs: + description: Great-Lakes loop DS-3 pair + - name: RG_Atlanta_Houston + attrs: + description: Southern arc DS-3 pair + - name: RG_Cleveland_Chicago + attrs: + description: Northern arc DS-3 pair + - name: RG_Cleveland_NewYork + attrs: + description: Northern arc DS-3 pair + - name: RG_Cleveland_StLouis + attrs: + description: Mid-west shortcut DS-3 pair + - name: RG_Chicago_AnnArbor + attrs: + description: Great-Lakes loop DS-3 pair + - name: RG_Chicago_PaloAlto + attrs: + description: Trans-continental northern DS-3 pair + - name: RG_Denver_Lincoln + attrs: + description: Rockies DS-3 pair + - name: RG_Denver_SaltLakeCity + attrs: + description: Rockies DS-3 pair + - name: RG_Greensboro_Atlanta + attrs: + description: Southern arc DS-3 pair + - name: RG_Hartford_NewYork + attrs: + description: Hartford hub DS-3 pair + - name: RG_Hartford_WashingtonDC + attrs: + description: Hartford hub DS-3 pair + - name: RG_Houston_LosAngeles + attrs: + description: Southern arc DS-3 pair + - name: RG_Lincoln_StLouis + attrs: + description: Rockies DS-3 pair + - name: RG_LosAngeles_PaloAlto + attrs: + description: California DS-3 pair + - name: RG_NewYork_WashingtonDC + attrs: + description: Southern arc DS-3 pair + - name: RG_PaloAlto_Seattle + attrs: + description: Pacific-Northwest DS-3 pair + - name: RG_Seattle_SaltLakeCity + attrs: + description: Pacific-Northwest DS-3 pair + - name: RG_SaltLakeCity_Denver + attrs: + description: Rockies DS-3 pair + - name: RG_StLouis_Chicago + attrs: + description: Rockies DS-3 pair + - name: RG_WashingtonDC_Greensboro + attrs: + description: Southern arc DS-3 pair +failures: availability_1992: attrs: - description: > - Approximates 1992 backbone reliability: each physical DS-3 has - ~99.9 % monthly availability (p=0.001 failure), and each CNSS or - ENSS router has ~99.95 % availability (p=0.0005 failure). - fail_risk_groups: false - fail_risk_group_children: false + description: | + Approximates 1992 backbone reliability: each physical DS-3 has ~99.9 % monthly availability (p=0.001 failure), and each CNSS or ENSS router has ~99.95 % availability (p=0.0005 failure). + expand_groups: false + expand_children: false modes: - weight: 1.0 rules: - # link reliability - random independent failures - - entity_scope: link - rule_type: random - probability: 0.001 # 0.1 % chance a given circuit is down - # node reliability - random independent router failures - - entity_scope: node - rule_type: random - probability: 0.0005 # 0.05 % chance a given node is down - + - scope: link + mode: random + probability: 0.001 + - scope: node + mode: random + probability: 0.0005 single_link_failure: attrs: description: Fails exactly one random link to test network resilience modes: - weight: 1.0 rules: - - entity_scope: link - rule_type: choice + - scope: link + mode: choice count: 1 - -############################################################################### -# Workflow -############################################################################### workflow: - # MaxFlow capacity matrix between all node pairs - - step_type: MaxFlow + - type: MaxFlow name: node_to_node_capacity_matrix_1 - source: "^(.+)$" - sink: "^(.+)$" + source: ^(.+)$ + target: ^(.+)$ mode: pairwise failure_policy: single_link_failure iterations: 1000 @@ -721,11 +588,10 @@ workflow: store_failure_patterns: true include_flow_details: true include_min_cut: true - - - step_type: MaxFlow + - type: MaxFlow name: node_to_node_capacity_matrix_2 - source: "^(.+)$" - sink: "^(.+)$" + source: ^(.+)$ + target: ^(.+)$ mode: pairwise failure_policy: availability_1992 iterations: 1000 diff --git a/scenarios/readme_example.yml b/scenarios/readme_example.yml new file mode 100644 index 0000000..c23c336 --- /dev/null +++ b/scenarios/readme_example.yml @@ -0,0 +1,69 @@ +seed: 42 + +# Define reusable topology templates +blueprints: + Clos_Fabric: + nodes: + spine: { count: 2, template: "spine{n}" } + leaf: { count: 4, template: "leaf{n}" } + links: + - source: /leaf + target: /spine + pattern: mesh + capacity: 100 + cost: 1 + - source: /spine + target: /leaf + pattern: mesh + capacity: 100 + cost: 1 + +# Instantiate network from templates +network: + nodes: + site1: { blueprint: Clos_Fabric } + site2: { blueprint: Clos_Fabric } + links: + - source: { path: site1/spine } + target: { path: site2/spine } + pattern: one_to_one + capacity: 50 + cost: 10 + +# Define failure policy for Monte Carlo analysis +failures: + random_link: + modes: + - weight: 1.0 + rules: + - scope: link + mode: choice + count: 1 + +# Define traffic demands +demands: + global_traffic: + - source: ^site1/leaf/ + target: ^site2/leaf/ + volume: 100.0 + mode: combine + flow_policy: SHORTEST_PATHS_ECMP + +# Analysis workflow: find max capacity, then test under failures +workflow: + - type: NetworkStats + name: stats + - type: MaxFlow + name: site_capacity + source: ^site1/leaf/ + target: ^site2/leaf/ + mode: combine + - type: MaximumSupportedDemand + name: max_demand + demand_set: global_traffic + - type: TrafficMatrixPlacement + name: placement_at_max + demand_set: global_traffic + alpha_from_step: max_demand # Use alpha_star from MSD step + failure_policy: random_link + iterations: 100 diff --git a/scenarios/square_mesh.yaml b/scenarios/square_mesh.yaml index d7368bd..c7c32f3 100644 --- a/scenarios/square_mesh.yaml +++ b/scenarios/square_mesh.yaml @@ -1,72 +1,55 @@ -# Toy scenario: 4-node full mesh topology with single-link failure analysis -# Each node connected to every other node with identical link parameters - seed: 42 - network: nodes: N1: {} N2: {} N3: {} N4: {} - links: - # Full mesh connectivity: 6 bidirectional links (connects every node to every other node) - # Links are bidirectional by default in NetGraph - source: N1 target: N2 - link_params: - capacity: 2.0 - cost: 1.0 + capacity: 2.0 + cost: 1.0 - source: N1 target: N3 - link_params: - capacity: 1.0 - cost: 1.0 + capacity: 1.0 + cost: 1.0 - source: N1 target: N4 - link_params: - capacity: 2.0 - cost: 1.0 + capacity: 2.0 + cost: 1.0 - source: N2 target: N3 - link_params: - capacity: 2.0 - cost: 1.0 + capacity: 2.0 + cost: 1.0 - source: N2 target: N4 - link_params: - capacity: 1.0 - cost: 1.0 + capacity: 1.0 + cost: 1.0 - source: N3 target: N4 - link_params: - capacity: 2.0 - cost: 1.0 - -failure_policy_set: + capacity: 2.0 + cost: 1.0 +failures: single_link_failure: modes: - weight: 1.0 rules: - - entity_scope: link - rule_type: choice + - scope: link + mode: choice count: 1 - -traffic_matrix_set: +demands: baseline_traffic_matrix: - - source: "^N([1-4])$" - sink: "^N([1-4])$" - demand: 12.0 - mode: "pairwise" + - source: ^N([1-4])$ + target: ^N([1-4])$ + volume: 12.0 + mode: pairwise attrs: euclidean_km: 10 - workflow: - # 1) Maximum Supported Demand search - - step_type: MaximumSupportedDemand + - type: MaximumSupportedDemand name: msd_baseline - matrix_name: baseline_traffic_matrix + demand_set: baseline_traffic_matrix acceptance_rule: hard alpha_start: 1.0 growth_factor: 2.0 @@ -77,11 +60,9 @@ workflow: max_bisect_iters: 32 seeds_per_alpha: 1 placement_rounds: 2 - - # 2) Traffic matrix placement using MSD alpha - - step_type: TrafficMatrixPlacement + - type: TrafficMatrixPlacement name: tm_placement - matrix_name: baseline_traffic_matrix + demand_set: baseline_traffic_matrix failure_policy: single_link_failure iterations: 1000 parallelism: 8 @@ -91,12 +72,10 @@ workflow: include_flow_details: true alpha_from_step: msd_baseline alpha_from_field: data.alpha_star - - # 3) MaxFlow capacity matrix between all node pairs - - step_type: MaxFlow + - type: MaxFlow name: node_to_node_capacity_matrix - source: "^(N[1-4])$" - sink: "^(N[1-4])$" + source: ^(N[1-4])$ + target: ^(N[1-4])$ mode: pairwise failure_policy: single_link_failure iterations: 1000 diff --git a/tests/analysis/test_demand.py b/tests/analysis/test_demand.py index 1c6a653..4f80c20 100644 --- a/tests/analysis/test_demand.py +++ b/tests/analysis/test_demand.py @@ -27,14 +27,14 @@ def test_explicit_id_preserved(self) -> None: td = TrafficDemand( id="my-stable-id", source="A", - sink="B", - demand=100.0, + target="B", + volume=100.0, ) assert td.id == "my-stable-id" def test_auto_generated_id_when_none(self) -> None: """TrafficDemand without explicit ID auto-generates one.""" - td = TrafficDemand(source="A", sink="B", demand=100.0) + td = TrafficDemand(source="A", target="B", volume=100.0) assert td.id is not None assert "|" in td.id # Format: source|sink|uuid @@ -42,8 +42,8 @@ def test_id_round_trip_through_dict(self) -> None: """TrafficDemand ID survives dict serialization round-trip.""" original = TrafficDemand( source="A", - sink="B", - demand=100.0, + target="B", + volume=100.0, mode="combine", priority=1, ) @@ -53,8 +53,8 @@ def test_id_round_trip_through_dict(self) -> None: config = { "id": original.id, "source": original.source, - "sink": original.sink, - "demand": original.demand, + "target": original.target, + "volume": original.volume, "mode": original.mode, "priority": original.priority, } @@ -63,8 +63,8 @@ def test_id_round_trip_through_dict(self) -> None: reconstructed = TrafficDemand( id=config.get("id"), source=config["source"], - sink=config["sink"], - demand=config["demand"], + target=config["target"], + volume=config["volume"], mode=config.get("mode", "pairwise"), priority=config.get("priority", 0), ) @@ -75,19 +75,19 @@ def test_id_mismatch_without_explicit_id(self) -> None: """Two TrafficDemands from same config get different IDs if id not passed.""" config = { "source": "A", - "sink": "B", - "demand": 100.0, + "target": "B", + "volume": 100.0, } td1 = TrafficDemand( source=config["source"], - sink=config["sink"], - demand=config["demand"], + target=config["target"], + volume=config["volume"], ) td2 = TrafficDemand( source=config["source"], - sink=config["sink"], - demand=config["demand"], + target=config["target"], + volume=config["volume"], ) # Without explicit ID, each gets a different auto-generated ID @@ -99,7 +99,7 @@ class TestExpandDemandsPairwise: def test_pairwise_single_pair(self, simple_network: Network) -> None: """Pairwise mode with single source-sink creates one demand.""" - td = TrafficDemand(source="A", sink="D", demand=100.0, mode="pairwise") + td = TrafficDemand(source="A", target="D", volume=100.0, mode="pairwise") expansion = expand_demands(simple_network, [td]) assert len(expansion.demands) == 1 @@ -114,8 +114,8 @@ def test_pairwise_multiple_sources(self, simple_network: Network) -> None: """Pairwise mode with regex creates demand per (src, dst) pair.""" td = TrafficDemand( source="[AB]", # A and B - sink="[CD]", # C and D - demand=100.0, + target="[CD]", # C and D + volume=100.0, mode="pairwise", ) expansion = expand_demands(simple_network, [td]) @@ -132,8 +132,8 @@ def test_pairwise_no_self_loops(self, simple_network: Network) -> None: """Pairwise mode excludes self-loops.""" td = TrafficDemand( source="[AB]", - sink="[AB]", # Same as sources - demand=100.0, + target="[AB]", # Same as sources + volume=100.0, mode="pairwise", ) expansion = expand_demands(simple_network, [td]) @@ -151,8 +151,8 @@ def test_combine_creates_pseudo_nodes(self, simple_network: Network) -> None: """Combine mode creates pseudo source and sink nodes.""" td = TrafficDemand( source="[AB]", - sink="[CD]", - demand=100.0, + target="[CD]", + volume=100.0, mode="combine", ) expansion = expand_demands(simple_network, [td]) @@ -173,8 +173,8 @@ def test_combine_pseudo_node_names_use_id(self, simple_network: Network) -> None td = TrafficDemand( id="stable-id-123", source="A", - sink="D", - demand=100.0, + target="D", + volume=100.0, mode="combine", ) expansion = expand_demands(simple_network, [td]) @@ -188,8 +188,8 @@ def test_combine_augmentations_structure(self, simple_network: Network) -> None: td = TrafficDemand( id="test-id", source="[AB]", - sink="[CD]", - demand=100.0, + target="[CD]", + volume=100.0, mode="combine", ) expansion = expand_demands(simple_network, [td]) @@ -214,15 +214,15 @@ def test_same_id_produces_same_pseudo_nodes(self, simple_network: Network) -> No td1 = TrafficDemand( id="shared-id", source="A", - sink="D", - demand=100.0, + target="D", + volume=100.0, mode="combine", ) td2 = TrafficDemand( id="shared-id", source="A", - sink="D", - demand=200.0, # Different demand + target="D", + volume=200.0, # Different demand mode="combine", ) @@ -240,15 +240,15 @@ def test_different_ids_produce_different_pseudo_nodes( td1 = TrafficDemand( id="id-alpha", source="A", - sink="D", - demand=100.0, + target="D", + volume=100.0, mode="combine", ) td2 = TrafficDemand( id="id-beta", source="A", - sink="D", - demand=100.0, + target="D", + volume=100.0, mode="combine", ) @@ -272,8 +272,8 @@ def test_no_matching_nodes_raises(self, simple_network: Network) -> None: """Demand with no matching nodes raises ValueError.""" td = TrafficDemand( source="nonexistent", - sink="also_nonexistent", - demand=100.0, + target="also_nonexistent", + volume=100.0, ) with pytest.raises(ValueError, match="No demands could be expanded"): expand_demands(simple_network, [td]) @@ -282,14 +282,14 @@ def test_multiple_demands_mixed_modes(self, simple_network: Network) -> None: """Multiple demands with different modes expand correctly.""" td_pairwise = TrafficDemand( source="A", - sink="B", - demand=50.0, + target="B", + volume=50.0, mode="pairwise", ) td_combine = TrafficDemand( source="[CD]", - sink="[AB]", - demand=100.0, + target="[AB]", + volume=100.0, mode="combine", ) @@ -333,8 +333,8 @@ def test_group_by_selector(self, network_with_attrs: Network) -> None: """Dict selector with group_by groups nodes by attribute.""" td = TrafficDemand( source={"group_by": "dc"}, # Group by datacenter - sink={"group_by": "dc"}, - demand=100.0, + target={"group_by": "dc"}, + volume=100.0, mode="pairwise", ) expansion = expand_demands(network_with_attrs, [td]) @@ -355,16 +355,16 @@ def test_match_selector_filters_nodes(self, network_with_attrs: Network) -> None source={ "path": ".*", "match": { - "conditions": [{"attr": "role", "operator": "==", "value": "leaf"}] + "conditions": [{"attr": "role", "op": "==", "value": "leaf"}] }, }, - sink={ + target={ "path": ".*", "match": { - "conditions": [{"attr": "role", "operator": "==", "value": "spine"}] + "conditions": [{"attr": "role", "op": "==", "value": "spine"}] }, }, - demand=100.0, + volume=100.0, mode="pairwise", ) expansion = expand_demands(network_with_attrs, [td]) @@ -378,16 +378,16 @@ def test_combined_path_and_match(self, network_with_attrs: Network) -> None: source={ "path": "^dc1_.*", # Only dc1 "match": { - "conditions": [{"attr": "role", "operator": "==", "value": "leaf"}] + "conditions": [{"attr": "role", "op": "==", "value": "leaf"}] }, }, - sink={ + target={ "path": "^dc2_.*", # Only dc2 "match": { - "conditions": [{"attr": "role", "operator": "==", "value": "spine"}] + "conditions": [{"attr": "role", "op": "==", "value": "spine"}] }, }, - demand=100.0, + volume=100.0, mode="pairwise", ) expansion = expand_demands(network_with_attrs, [td]) @@ -396,108 +396,18 @@ def test_combined_path_and_match(self, network_with_attrs: Network) -> None: assert len(expansion.demands) == 4 -class TestVariableExpansion: - """Test expand_vars in demands.""" - - @pytest.fixture - def multi_dc_network(self) -> Network: - """Create a network with multiple datacenters.""" - network = Network() - for dc in ["dc1", "dc2", "dc3"]: - for i in [1, 2]: - network.add_node(Node(f"{dc}_server_{i}")) - # Full mesh between datacenters - for src_dc in ["dc1", "dc2", "dc3"]: - for dst_dc in ["dc1", "dc2", "dc3"]: - if src_dc != dst_dc: - for i in [1, 2]: - for j in [1, 2]: - network.add_link( - Link( - f"{src_dc}_server_{i}", - f"{dst_dc}_server_{j}", - capacity=100.0, - ) - ) - return network - - def test_expand_vars_cartesian(self, multi_dc_network: Network) -> None: - """Variable expansion with cartesian mode creates all combinations.""" - td = TrafficDemand( - source="^${src_dc}_server_.*", - sink="^${dst_dc}_server_.*", - demand=100.0, - mode="combine", - expand_vars={ - "src_dc": ["dc1", "dc2"], - "dst_dc": ["dc2", "dc3"], - }, - expansion_mode="cartesian", - ) - expansion = expand_demands(multi_dc_network, [td]) - - # Cartesian: 2 src_dc x 2 dst_dc = 4 combinations - # (dc1->dc2, dc1->dc3, dc2->dc2-skip self, dc2->dc3) - # Actually dc2->dc2 is not a self-pair at demand level - assert len(expansion.demands) == 4 - - def test_expand_vars_zip(self, multi_dc_network: Network) -> None: - """Variable expansion with zip mode pairs variables by index.""" - td = TrafficDemand( - source="^${src_dc}_server_.*", - sink="^${dst_dc}_server_.*", - demand=100.0, - mode="combine", - expand_vars={ - "src_dc": ["dc1", "dc2"], - "dst_dc": ["dc2", "dc3"], - }, - expansion_mode="zip", - ) - expansion = expand_demands(multi_dc_network, [td]) - - # Zip: (dc1, dc2) and (dc2, dc3) = 2 combinations - assert len(expansion.demands) == 2 - - def test_expand_vars_with_dict_selector(self, multi_dc_network: Network) -> None: - """Variable expansion works with dict selectors.""" - # Add dc attribute to nodes - for node in multi_dc_network.nodes.values(): - dc = node.name.split("_")[0] - node.attrs["dc"] = dc - - td = TrafficDemand( - source={"path": "^${dc}_server_.*"}, - sink={"path": "^${dc}_server_.*"}, - demand=100.0, - mode="pairwise", - expand_vars={"dc": ["dc1", "dc2"]}, - ) - expansion = expand_demands(multi_dc_network, [td]) - - # For each dc: 2 servers, 2 pairs (1->2 and 2->1) - # 2 dcs x 2 pairs = 4 total - assert len(expansion.demands) == 4 - - class TestTrafficDemandFieldPreservation: - """Test that TrafficDemand fields are preserved in workflow contexts. - - Verifies that group_mode, expand_vars, and expansion_mode fields - are correctly preserved when TrafficDemand objects are copied/serialized. - """ + """Test that TrafficDemand fields are preserved in workflow contexts.""" def test_all_fields_preserved_in_dict_round_trip(self) -> None: - """All new fields survive dict serialization.""" + """Core fields survive dict serialization.""" original = TrafficDemand( id="test-id", source="^dc1/.*", - sink="^dc2/.*", - demand=100.0, + target="^dc2/.*", + volume=100.0, mode="combine", group_mode="per_group", - expand_vars={"dc": ["dc1", "dc2"]}, - expansion_mode="zip", priority=5, ) @@ -505,12 +415,10 @@ def test_all_fields_preserved_in_dict_round_trip(self) -> None: serialized = { "id": original.id, "source": original.source, - "sink": original.sink, - "demand": original.demand, + "target": original.target, + "volume": original.volume, "mode": original.mode, "group_mode": original.group_mode, - "expand_vars": original.expand_vars, - "expansion_mode": original.expansion_mode, "priority": original.priority, } @@ -518,33 +426,27 @@ def test_all_fields_preserved_in_dict_round_trip(self) -> None: reconstructed = TrafficDemand( id=serialized.get("id") or "", source=serialized["source"], - sink=serialized["sink"], - demand=float(serialized["demand"]), + target=serialized["target"], + volume=float(serialized["volume"]), mode=str(serialized.get("mode", "pairwise")), group_mode=str(serialized.get("group_mode", "flatten")), - expand_vars=serialized.get("expand_vars") or {}, - expansion_mode=str(serialized.get("expansion_mode", "cartesian")), priority=int(serialized.get("priority", 0)), ) assert reconstructed.id == original.id assert reconstructed.source == original.source - assert reconstructed.sink == original.sink - assert reconstructed.demand == original.demand + assert reconstructed.target == original.target + assert reconstructed.volume == original.volume assert reconstructed.mode == original.mode assert reconstructed.group_mode == original.group_mode - assert reconstructed.expand_vars == original.expand_vars - assert reconstructed.expansion_mode == original.expansion_mode assert reconstructed.priority == original.priority def test_default_values_for_new_fields(self) -> None: """New fields have sensible defaults when not specified.""" td = TrafficDemand( source="^A$", - sink="^B$", - demand=100.0, + target="^B$", + volume=100.0, ) assert td.group_mode == "flatten" - assert td.expand_vars == {} - assert td.expansion_mode == "cartesian" diff --git a/tests/analysis/test_failure_manager.py b/tests/analysis/test_failure_manager.py index 54542d2..707a09c 100644 --- a/tests/analysis/test_failure_manager.py +++ b/tests/analysis/test_failure_manager.py @@ -10,8 +10,8 @@ import pytest from ngraph.analysis.failure_manager import FailureManager +from ngraph.dsl.selectors.schema import Condition from ngraph.model.failure.policy import ( - FailureCondition, FailureMode, FailurePolicy, FailureRule, @@ -36,7 +36,7 @@ def simple_network() -> Network: @pytest.fixture def failure_policy() -> FailurePolicy: """Create a simple failure policy for testing.""" - rule = FailureRule(entity_scope="node", rule_type="choice", count=1) + rule = FailureRule(scope="node", mode="choice", count=1) return FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) @@ -184,10 +184,10 @@ def test_node_matching_on_disabled_attribute( simple_network.nodes["node1"].disabled = True rule = FailureRule( - entity_scope="node", - conditions=[FailureCondition(attr="disabled", operator="==", value=True)], + scope="node", + conditions=[Condition(attr="disabled", op="==", value=True)], logic="and", - rule_type="all", + mode="all", ) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) @@ -209,10 +209,10 @@ def test_link_matching_on_capacity_attribute( ) -> None: """Test link matching on capacity attribute.""" rule = FailureRule( - entity_scope="link", - conditions=[FailureCondition(attr="capacity", operator=">", value=150.0)], + scope="link", + conditions=[Condition(attr="capacity", op=">", value=150.0)], logic="and", - rule_type="all", + mode="all", ) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) @@ -285,7 +285,7 @@ def test_run_max_flow_monte_carlo_delegates( result = failure_manager.run_max_flow_monte_carlo( source="datacenter.*", - sink="edge.*", + target="edge.*", mode="combine", iterations=2, parallelism=1, @@ -323,7 +323,7 @@ def test_flow_placement_string_conversion_max_flow( failure_manager.run_max_flow_monte_carlo( source="src.*", - sink="dst.*", + target="dst.*", flow_placement="EQUAL_BALANCED", iterations=1, ) @@ -338,7 +338,7 @@ def test_invalid_flow_placement_string_raises_error( with pytest.raises(ValueError) as exc_info: failure_manager.run_max_flow_monte_carlo( source="src.*", - sink="dst.*", + target="dst.*", flow_placement="INVALID_OPTION", iterations=1, ) @@ -358,7 +358,7 @@ def test_case_insensitive_flow_placement_conversion( failure_manager.run_max_flow_monte_carlo( source="src.*", - sink="dst.*", + target="dst.*", flow_placement="proportional", # lowercase iterations=1, ) diff --git a/tests/analysis/test_failure_manager_integration.py b/tests/analysis/test_failure_manager_integration.py index 895fc9c..146aebe 100644 --- a/tests/analysis/test_failure_manager_integration.py +++ b/tests/analysis/test_failure_manager_integration.py @@ -35,8 +35,8 @@ def failure_policy_set(self): # Single link failure policy rule = FailureRule( - entity_scope="link", - rule_type="choice", + scope="link", + mode="choice", count=1, ) from ngraph.model.failure.policy import FailureMode @@ -120,7 +120,7 @@ def test_run_monte_carlo_analysis(self, simple_network, failure_policy_set): seed=42, # Pass analysis parameters directly as kwargs source="A", - sink="C", + target="C", mode="combine", ) @@ -163,7 +163,7 @@ def test_analysis_with_parallel_execution(self, simple_network, failure_policy_s parallelism=2, # Multiple workers seed=42, source="A", - sink="C", + target="C", mode="combine", ) @@ -186,7 +186,7 @@ def test_baseline_iteration_handling(self, simple_network, failure_policy_set): parallelism=1, seed=42, source="A", - sink="C", + target="C", mode="combine", ) @@ -214,7 +214,7 @@ def test_failure_trace_fields_present(self, simple_network, failure_policy_set): store_failure_patterns=True, seed=42, source="A", - sink="C", + target="C", mode="combine", ) @@ -240,8 +240,8 @@ def test_failure_trace_fields_present(self, simple_network, failure_policy_set): if trace["selections"]: sel = trace["selections"][0] assert "rule_index" in sel - assert "entity_scope" in sel - assert "rule_type" in sel + assert "scope" in sel + assert "mode" in sel assert "matched_count" in sel assert "selected_ids" in sel @@ -263,7 +263,7 @@ def test_failure_trace_not_present_when_disabled( store_failure_patterns=False, # Disabled seed=42, source="A", - sink="C", + target="C", mode="combine", ) @@ -283,7 +283,7 @@ def test_baseline_has_no_trace_fields(self, simple_network, failure_policy_set): store_failure_patterns=True, seed=42, source="A", - sink="C", + target="C", mode="combine", ) @@ -305,7 +305,7 @@ def run(): store_failure_patterns=True, seed=42, source="A", - sink="C", + target="C", mode="combine", ) @@ -351,8 +351,8 @@ def test_capacity_envelope_analysis_integration(self): # Create failure policy policy_set = FailurePolicySet() rule = FailureRule( - entity_scope="link", - rule_type="choice", + scope="link", + mode="choice", count=2, ) from ngraph.model.failure.policy import FailureMode @@ -369,7 +369,7 @@ def test_capacity_envelope_analysis_integration(self): parallelism=1, seed=123, source="spine.*", - sink="leaf.*", + target="leaf.*", mode="pairwise", ) diff --git a/tests/analysis/test_functions.py b/tests/analysis/test_functions.py index 90721d7..0f2ce33 100644 --- a/tests/analysis/test_functions.py +++ b/tests/analysis/test_functions.py @@ -38,7 +38,7 @@ def test_max_flow_analysis_basic(self, simple_network: Network) -> None: excluded_nodes=set(), excluded_links=set(), source="datacenter.*", - sink="edge.*", + target="edge.*", mode="combine", ) @@ -59,7 +59,7 @@ def test_max_flow_analysis_with_summary(self, simple_network: Network) -> None: excluded_nodes=set(), excluded_links=set(), source="datacenter.*", - sink="edge.*", + target="edge.*", include_flow_details=True, include_min_cut=True, ) @@ -85,11 +85,10 @@ def test_max_flow_analysis_with_optional_params( excluded_nodes=set(), excluded_links=set(), source="datacenter.*", - sink="edge.*", + target="edge.*", mode="pairwise", shortest_path=True, flow_placement=FlowPlacement.EQUAL_BALANCED, - extra_param="ignored", ) assert isinstance(result, FlowIterationResult) @@ -100,6 +99,20 @@ def test_max_flow_analysis_with_optional_params( assert flow.source.startswith("datacenter") assert flow.destination.startswith("edge") + def test_max_flow_analysis_rejects_unknown_params( + self, simple_network: Network + ) -> None: + """Test that unknown parameters raise TypeError.""" + with pytest.raises(TypeError, match="unexpected keyword argument"): + max_flow_analysis( + network=simple_network, + excluded_nodes=set(), + excluded_links=set(), + source="datacenter.*", + target="edge.*", + unknown_param="should_fail", + ) + def test_max_flow_analysis_empty_result(self, simple_network: Network) -> None: """Test max_flow_analysis with no matching nodes raises an error.""" # In NetGraph-Core, non-matching nodes raise ValueError (better UX than silent empty) @@ -109,28 +122,14 @@ def test_max_flow_analysis_empty_result(self, simple_network: Network) -> None: excluded_nodes=set(), excluded_links=set(), source="nonexistent.*", - sink="also_nonexistent.*", + target="also_nonexistent.*", ) class TestDemandPlacementAnalysis: """Test demand_placement_analysis function.""" - @pytest.fixture - def diamond_network(self) -> Network: - """Create a diamond network for testing demand placement.""" - network = Network() - # Add nodes: A -> B,C -> D (diamond shape) - for node in ["A", "B", "C", "D"]: - network.add_node(Node(node)) - - # Add links with limited capacity - network.add_link(Link("A", "B", capacity=60.0, cost=1.0)) - network.add_link(Link("A", "C", capacity=60.0, cost=1.0)) - network.add_link(Link("B", "D", capacity=60.0, cost=1.0)) - network.add_link(Link("C", "D", capacity=60.0, cost=1.0)) - - return network + # Uses diamond_network fixture from conftest.py def test_demand_placement_analysis_basic(self, diamond_network: Network) -> None: """Test basic demand_placement_analysis functionality.""" @@ -138,8 +137,8 @@ def test_demand_placement_analysis_basic(self, diamond_network: Network) -> None demands_config = [ { "source": "A", - "sink": "D", - "demand": 50.0, + "target": "D", + "volume": 50.0, "mode": "pairwise", "priority": 0, }, @@ -178,8 +177,8 @@ def test_demand_placement_analysis_zero_total_demand( demands_config = [ { "source": "A", - "sink": "B", - "demand": 0.0, + "target": "B", + "volume": 0.0, } ] @@ -203,17 +202,7 @@ def test_demand_placement_analysis_zero_total_demand( class TestDemandPlacementWithContextCaching: """Test demand_placement_analysis with pre-built context caching.""" - @pytest.fixture - def diamond_network(self) -> Network: - """Create a diamond network for testing.""" - network = Network() - for node in ["A", "B", "C", "D"]: - network.add_node(Node(node)) - network.add_link(Link("A", "B", capacity=60.0, cost=1.0)) - network.add_link(Link("A", "C", capacity=60.0, cost=1.0)) - network.add_link(Link("B", "D", capacity=60.0, cost=1.0)) - network.add_link(Link("C", "D", capacity=60.0, cost=1.0)) - return network + # Uses diamond_network fixture from conftest.py def test_context_caching_pairwise_mode(self, diamond_network: Network) -> None: """Context caching works with pairwise mode.""" @@ -223,8 +212,8 @@ def test_context_caching_pairwise_mode(self, diamond_network: Network) -> None: { "id": "stable-pairwise-id", "source": "A", - "sink": "D", - "demand": 50.0, + "target": "D", + "volume": 50.0, "mode": "pairwise", }, ] @@ -252,8 +241,8 @@ def test_context_caching_combine_mode(self, diamond_network: Network) -> None: { "id": "stable-combine-id", "source": "[AB]", - "sink": "[CD]", - "demand": 50.0, + "target": "[CD]", + "volume": 50.0, "mode": "combine", }, ] @@ -283,8 +272,8 @@ def test_context_caching_combine_multiple_iterations( { "id": "reusable-id", "source": "[AB]", - "sink": "[CD]", - "demand": 50.0, + "target": "[CD]", + "volume": 50.0, "mode": "combine", }, ] @@ -310,8 +299,8 @@ def test_context_caching_without_id_raises(self, diamond_network: Network) -> No demands_config = [ { "source": "[AB]", - "sink": "[CD]", - "demand": 50.0, + "target": "[CD]", + "volume": 50.0, "mode": "combine", }, ] @@ -351,7 +340,7 @@ def test_sensitivity_analysis_basic(self, simple_network: Network) -> None: excluded_nodes=set(), excluded_links=set(), source="A", - sink="C", + target="C", mode="combine", ) @@ -381,5 +370,5 @@ def test_sensitivity_analysis_empty_result(self, simple_network: Network) -> Non excluded_nodes=set(), excluded_links=set(), source="nonexistent.*", - sink="also_nonexistent.*", + target="also_nonexistent.*", ) diff --git a/tests/analysis/test_functions_details.py b/tests/analysis/test_functions_details.py index 101ed03..38f76c6 100644 --- a/tests/analysis/test_functions_details.py +++ b/tests/analysis/test_functions_details.py @@ -23,8 +23,8 @@ def test_demand_placement_analysis_includes_flow_details_costs_and_edges() -> No demands_config = [ { "source": "A", - "sink": "D", - "demand": 150.0, # Exceeds single path capacity, will use both paths + "target": "D", + "volume": 150.0, # Exceeds single path capacity, will use both paths "mode": "pairwise", "priority": 0, }, diff --git a/tests/analysis/test_maxflow_api.py b/tests/analysis/test_maxflow_api.py index a356d09..a9d6578 100644 --- a/tests/analysis/test_maxflow_api.py +++ b/tests/analysis/test_maxflow_api.py @@ -170,7 +170,7 @@ def test_network_dc_to_dc_reverse_edge_first_hop() -> None: Nodes: A/dc, A/leaf, B/leaf, B/dc. Links (forward): A/leaf->A/dc (10), A/leaf->B/leaf (10), B/leaf->B/dc (10) - The wrapper builds a StrictMultiDiGraph with add_reverse=True, creating + The graph builder creates a StrictMultiDiGraph with add_reverse=True, creating reverse DC->leaf edges, so A/dc can reach B/dc via DC->leaf->leaf->DC. Expect positive flow (10.0) in combine mode. diff --git a/tests/analysis/test_maxflow_cache.py b/tests/analysis/test_maxflow_cache.py index 3f1aa5b..af2f241 100644 --- a/tests/analysis/test_maxflow_cache.py +++ b/tests/analysis/test_maxflow_cache.py @@ -15,42 +15,7 @@ import pytest from ngraph import Link, Mode, Network, Node, analyze - - -def _diamond_network( - *, - disable_node_b: bool = False, - disable_link_a_b: bool = False, -) -> Network: - """Build a diamond network with optional disabled components. - - Topology: - A -> B (cap 5) -> D (cap 5) [path 1, cost 2] - A -> C (cap 3) -> D (cap 3) [path 2, cost 4] - - With both paths enabled: max flow = 8 (5 via B + 3 via C) - With B disabled: max flow = 3 (only via C) - With A->B link disabled: max flow = 3 (only via C) - - Args: - disable_node_b: If True, disable node B. - disable_link_a_b: If True, disable the A->B link. - - Returns: - Network with configured topology. - """ - net = Network() - net.add_node(Node("A")) - net.add_node(Node("B", disabled=disable_node_b)) - net.add_node(Node("C")) - net.add_node(Node("D")) - - net.add_link(Link("A", "B", capacity=5.0, cost=1.0, disabled=disable_link_a_b)) - net.add_link(Link("B", "D", capacity=5.0, cost=1.0)) - net.add_link(Link("A", "C", capacity=3.0, cost=2.0)) - net.add_link(Link("C", "D", capacity=3.0, cost=2.0)) - - return net +from tests.conftest import make_asymmetric_diamond def _linear_network(*, disable_middle: bool = False) -> Network: @@ -78,7 +43,7 @@ class TestDisabledNodes: def test_disabled_node_blocks_path_bound(self) -> None: """Disabled node should block flow through it in bound context.""" - net = _diamond_network(disable_node_b=True) + net = make_asymmetric_diamond(disable_node_b=True) # Bound context - source/sink pre-configured ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) @@ -89,7 +54,7 @@ def test_disabled_node_blocks_path_bound(self) -> None: def test_disabled_node_blocks_path_unbound(self) -> None: """Disabled node should block flow through it in unbound context.""" - net = _diamond_network(disable_node_b=True) + net = make_asymmetric_diamond(disable_node_b=True) # Unbound context - source/sink per-call ctx = analyze(net) @@ -100,7 +65,7 @@ def test_disabled_node_blocks_path_unbound(self) -> None: def test_disabled_node_bound_vs_unbound_consistency(self) -> None: """Bound and unbound contexts should produce identical results.""" - net = _diamond_network(disable_node_b=True) + net = make_asymmetric_diamond(disable_node_b=True) # Unbound result_unbound = analyze(net).max_flow("^A$", "^D$", mode=Mode.COMBINE) @@ -122,7 +87,7 @@ def test_disabled_node_in_only_path_yields_zero_flow(self) -> None: def test_max_flow_detailed_disabled_node(self) -> None: """max_flow_detailed should respect disabled nodes.""" - net = _diamond_network(disable_node_b=True) + net = make_asymmetric_diamond(disable_node_b=True) ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) result = ctx.max_flow_detailed() @@ -136,7 +101,7 @@ def test_max_flow_detailed_disabled_node(self) -> None: def test_sensitivity_disabled_node(self) -> None: """sensitivity analysis should respect disabled nodes.""" - net = _diamond_network(disable_node_b=True) + net = make_asymmetric_diamond(disable_node_b=True) ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) result = ctx.sensitivity() @@ -155,7 +120,7 @@ class TestDisabledLinks: def test_disabled_link_blocks_path_bound(self) -> None: """Disabled link should block flow through it in bound context.""" - net = _diamond_network(disable_link_a_b=True) + net = make_asymmetric_diamond(disable_link_a_b=True) ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) result = ctx.max_flow() @@ -165,7 +130,7 @@ def test_disabled_link_blocks_path_bound(self) -> None: def test_disabled_link_blocks_path_unbound(self) -> None: """Disabled link should block flow through it in unbound context.""" - net = _diamond_network(disable_link_a_b=True) + net = make_asymmetric_diamond(disable_link_a_b=True) result = analyze(net).max_flow("^A$", "^D$", mode=Mode.COMBINE) @@ -174,7 +139,7 @@ def test_disabled_link_blocks_path_unbound(self) -> None: def test_disabled_link_bound_vs_unbound_consistency(self) -> None: """Bound and unbound contexts should produce identical results.""" - net = _diamond_network(disable_link_a_b=True) + net = make_asymmetric_diamond(disable_link_a_b=True) result_unbound = analyze(net).max_flow("^A$", "^D$", mode=Mode.COMBINE) @@ -197,7 +162,7 @@ def test_disabled_link_in_only_path_yields_zero_flow(self) -> None: def test_max_flow_detailed_disabled_link(self) -> None: """max_flow_detailed should respect disabled links.""" - net = _diamond_network(disable_link_a_b=True) + net = make_asymmetric_diamond(disable_link_a_b=True) ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) result = ctx.max_flow_detailed() @@ -211,7 +176,7 @@ class TestCombinedExclusions: def test_disabled_node_plus_explicit_node_exclusion(self) -> None: """Both disabled and explicitly excluded nodes should be masked.""" - net = _diamond_network(disable_node_b=True) # B disabled + net = make_asymmetric_diamond(disable_node_b=True) # B disabled ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) @@ -222,7 +187,7 @@ def test_disabled_node_plus_explicit_node_exclusion(self) -> None: def test_disabled_link_plus_explicit_link_exclusion(self) -> None: """Both disabled and explicitly excluded links should be masked.""" - net = _diamond_network(disable_link_a_b=True) # A->B disabled + net = make_asymmetric_diamond(disable_link_a_b=True) # A->B disabled ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) @@ -241,7 +206,7 @@ def test_disabled_link_plus_explicit_link_exclusion(self) -> None: def test_explicit_exclusion_without_disabled_topology(self) -> None: """Explicit exclusions should work even when no disabled topology.""" - net = _diamond_network() # Nothing disabled + net = make_asymmetric_diamond() # Nothing disabled ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) @@ -257,7 +222,7 @@ class TestNoDisabledTopology: def test_no_disabled_topology_full_flow(self) -> None: """With no disabled components, full flow should be achieved.""" - net = _diamond_network() # Nothing disabled + net = make_asymmetric_diamond() # Nothing disabled ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) result = ctx.max_flow() @@ -267,7 +232,7 @@ def test_no_disabled_topology_full_flow(self) -> None: def test_bound_vs_unbound_no_disabled(self) -> None: """Bound and unbound should match when nothing is disabled.""" - net = _diamond_network() + net = make_asymmetric_diamond() result_unbound = analyze(net).max_flow("^A$", "^D$", mode=Mode.COMBINE) @@ -314,7 +279,7 @@ class TestContextReuse: def test_multiple_exclusion_scenarios(self) -> None: """Same context should work with different exclusion sets.""" - net = _diamond_network() # Nothing disabled + net = make_asymmetric_diamond() # Nothing disabled ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) @@ -336,7 +301,7 @@ def test_multiple_exclusion_scenarios(self) -> None: def test_bound_context_rejects_source_sink_override(self) -> None: """Bound context should reject source/sink arguments.""" - net = _diamond_network() + net = make_asymmetric_diamond() ctx = analyze(net, source="^A$", sink="^D$", mode=Mode.COMBINE) @@ -345,7 +310,7 @@ def test_bound_context_rejects_source_sink_override(self) -> None: def test_unbound_context_requires_source_sink(self) -> None: """Unbound context should require source/sink arguments.""" - net = _diamond_network() + net = make_asymmetric_diamond() ctx = analyze(net) diff --git a/tests/analysis/test_maxflow_cost_distribution.py b/tests/analysis/test_maxflow_cost_distribution.py index 5aee8cb..371ddad 100644 --- a/tests/analysis/test_maxflow_cost_distribution.py +++ b/tests/analysis/test_maxflow_cost_distribution.py @@ -9,27 +9,7 @@ import pytest from ngraph import FlowPlacement, Link, Mode, Network, Node, analyze - - -def _multi_tier_network() -> Network: - """Build a network with multiple cost tiers. - - Topology: - A -> B (cost 1, cap 5) -> D (cost 1, cap 5) [tier 1: cost 2, cap 5] - A -> C (cost 2, cap 3) -> D (cost 2, cap 3) [tier 2: cost 4, cap 3] - - Total max flow: 8 (5 from tier 1 + 3 from tier 2) - """ - net = Network() - for name in ["A", "B", "C", "D"]: - net.add_node(Node(name)) - - net.add_link(Link("A", "B", capacity=5.0, cost=1.0)) - net.add_link(Link("B", "D", capacity=5.0, cost=1.0)) - net.add_link(Link("A", "C", capacity=3.0, cost=2.0)) - net.add_link(Link("C", "D", capacity=3.0, cost=2.0)) - - return net +from tests.conftest import make_asymmetric_diamond def _parallel_equal_cost_network() -> Network: @@ -73,7 +53,7 @@ class TestCostDistributionBasic: def test_multi_tier_distribution(self) -> None: """Test that flow is distributed across cost tiers correctly.""" - net = _multi_tier_network() + net = make_asymmetric_diamond() result = analyze(net).max_flow_detailed("^A$", "^D$", mode=Mode.COMBINE) @@ -121,9 +101,9 @@ class TestShortestPathMode: def test_shortest_path_mode_uses_only_best_tier(self) -> None: """Test that shortest_path=True only uses lowest cost tier.""" - net = _multi_tier_network() + net = make_asymmetric_diamond() - # _multi_tier_network has nodes A, B, C, D + # make_asymmetric_diamond has nodes A, B, C, D result = analyze(net).max_flow_detailed( "^A$", "^D$", mode=Mode.COMBINE, shortest_path=True ) @@ -168,7 +148,7 @@ class TestFlowPlacement: def test_proportional_placement(self) -> None: """Test PROPORTIONAL flow placement.""" - net = _multi_tier_network() + net = make_asymmetric_diamond() result = analyze(net).max_flow_detailed( "^A$", "^D$", mode=Mode.COMBINE, flow_placement=FlowPlacement.PROPORTIONAL diff --git a/tests/analysis/test_paths.py b/tests/analysis/test_paths.py index aeef604..c9191d4 100644 --- a/tests/analysis/test_paths.py +++ b/tests/analysis/test_paths.py @@ -285,13 +285,13 @@ def test_shortest_paths_with_match_selector(self) -> None: { "path": ".*", "match": { - "conditions": [{"attr": "group", "operator": "==", "value": "src"}] + "conditions": [{"attr": "group", "op": "==", "value": "src"}] }, }, { "path": ".*", "match": { - "conditions": [{"attr": "group", "operator": "==", "value": "dst"}] + "conditions": [{"attr": "group", "op": "==", "value": "dst"}] }, }, mode=Mode.COMBINE, diff --git a/tests/analysis/test_placement.py b/tests/analysis/test_placement.py index cdcb63a..afb7956 100644 --- a/tests/analysis/test_placement.py +++ b/tests/analysis/test_placement.py @@ -20,6 +20,149 @@ from ngraph.model.network import Link, Network, Node from ngraph.results.flow import FlowIterationResult +# --------------------------------------------------------------------------- +# Reference implementation (non-cached) for cross-validation testing +# --------------------------------------------------------------------------- + + +def _run_demand_placement_without_cache( + network: Network, + demands_config: list[dict[str, Any]], + include_flow_details: bool = False, + include_used_edges: bool = False, +) -> FlowIterationResult: + """Run demand placement using only FlowPolicy (no caching). + + This provides a reference implementation for equivalence testing. + """ + import netgraph_core + + from ngraph.analysis import AnalysisContext + from ngraph.analysis.demand import expand_demands + from ngraph.model.demand.spec import TrafficDemand + from ngraph.model.flow.policy_config import ( + FlowPolicyPreset, + create_flow_policy, + ) + from ngraph.results.flow import FlowEntry, FlowSummary + + # Reconstruct TrafficDemand objects + traffic_demands = [] + for config in demands_config: + demand = TrafficDemand( + source=config["source"], + target=config["target"], + volume=config["volume"], + mode=config.get("mode", "pairwise"), + flow_policy=config.get("flow_policy"), + priority=config.get("priority", 0), + ) + traffic_demands.append(demand) + + # Expand demands + expansion = expand_demands( + network, + traffic_demands, + default_policy_preset=FlowPolicyPreset.SHORTEST_PATHS_ECMP, + ) + + # Build context + ctx = AnalysisContext.from_network(network, augmentations=expansion.augmentations) + + handle = ctx.handle + multidigraph = ctx.multidigraph + node_mapper = ctx.node_mapper + edge_mapper = ctx.edge_mapper + algorithms = ctx.algorithms + node_mask = ctx._build_node_mask(set()) + edge_mask = ctx._build_edge_mask(set()) + + flow_graph = netgraph_core.FlowGraph(multidigraph) + + # Place demands using ONLY FlowPolicy (no caching) + flow_entries: list[FlowEntry] = [] + total_demand = 0.0 + total_placed = 0.0 + + for demand in expansion.demands: + src_id = node_mapper.to_id(demand.src_name) + dst_id = node_mapper.to_id(demand.dst_name) + + policy = create_flow_policy( + algorithms, + handle, + demand.policy_preset, + node_mask=node_mask, + edge_mask=edge_mask, + ) + + placed, flow_count = policy.place_demand( + flow_graph, + src_id, + dst_id, + demand.priority, + demand.volume, + ) + + cost_distribution: dict[float, float] = {} + used_edges: set[str] = set() + + if include_flow_details or include_used_edges: + flows_dict = policy.flows + for flow_key, flow_data in flows_dict.items(): + if include_flow_details: + cost = float(flow_data[2]) + flow_vol = float(flow_data[3]) + if flow_vol > 0: + cost_distribution[cost] = ( + cost_distribution.get(cost, 0.0) + flow_vol + ) + + if include_used_edges: + flow_idx = netgraph_core.FlowIndex( + flow_key[0], flow_key[1], flow_key[2], flow_key[3] + ) + edges = flow_graph.get_flow_edges(flow_idx) + for edge_id, _ in edges: + edge_ref = edge_mapper.to_ref(edge_id, multidigraph) + if edge_ref is not None: + used_edges.add(f"{edge_ref.link_id}:{edge_ref.direction}") + + entry_data: dict[str, Any] = {} + if include_used_edges and used_edges: + entry_data["edges"] = sorted(used_edges) + entry_data["edges_kind"] = "used" + + entry = FlowEntry( + source=demand.src_name, + destination=demand.dst_name, + priority=demand.priority, + demand=demand.volume, + placed=placed, + dropped=demand.volume - placed, + cost_distribution=cost_distribution if include_flow_details else {}, + data=entry_data, + ) + flow_entries.append(entry) + total_demand += demand.volume + total_placed += placed + + overall_ratio = (total_placed / total_demand) if total_demand > 0 else 1.0 + dropped_flows = sum(1 for e in flow_entries if e.dropped > 0.0) + summary = FlowSummary( + total_demand=total_demand, + total_placed=total_placed, + overall_ratio=overall_ratio, + dropped_flows=dropped_flows, + num_flows=len(flow_entries), + ) + + return FlowIterationResult( + flows=flow_entries, + summary=summary, + data={}, + ) + class TestHelperFunctions: """Test helper functions for SPF caching.""" @@ -82,20 +225,7 @@ def test_lsp_policies_not_cacheable(self) -> None: class TestSPFCachingBasic: """Test basic SPF caching behavior.""" - @pytest.fixture - def diamond_network(self) -> Network: - """Create a diamond network: A -> B,C -> D.""" - network = Network() - for node in ["A", "B", "C", "D"]: - network.add_node(Node(node)) - - # Two equal-cost paths of capacity 60 each - network.add_link(Link("A", "B", capacity=60.0, cost=1.0)) - network.add_link(Link("A", "C", capacity=60.0, cost=1.0)) - network.add_link(Link("B", "D", capacity=60.0, cost=1.0)) - network.add_link(Link("C", "D", capacity=60.0, cost=1.0)) - - return network + # Uses diamond_network fixture from conftest.py @pytest.fixture def multi_source_network(self) -> Network: @@ -128,8 +258,8 @@ def test_single_demand_ecmp(self, diamond_network: Network) -> None: demands_config = [ { "source": "A", - "sink": "D", - "demand": 50.0, + "target": "D", + "volume": 50.0, "mode": "pairwise", "priority": 0, }, @@ -158,15 +288,15 @@ def test_multiple_demands_same_source_reuses_cache( demands_config = [ { "source": "S1", - "sink": "D1", - "demand": 30.0, + "target": "D1", + "volume": 30.0, "mode": "pairwise", "priority": 0, }, { "source": "S1", - "sink": "D2", - "demand": 30.0, + "target": "D2", + "volume": 30.0, "mode": "pairwise", "priority": 0, }, @@ -189,20 +319,20 @@ def test_demands_from_multiple_sources(self, multi_source_network: Network) -> N demands_config = [ { "source": "S1", - "sink": "D1", - "demand": 50.0, + "target": "D1", + "volume": 50.0, "mode": "pairwise", }, { "source": "S2", - "sink": "D1", - "demand": 50.0, + "target": "D1", + "volume": 50.0, "mode": "pairwise", }, { "source": "S3", - "sink": "D2", - "demand": 50.0, + "target": "D2", + "volume": 50.0, "mode": "pairwise", }, ] @@ -245,156 +375,13 @@ def mesh_network(self) -> Network: return network - def _run_demand_placement_without_cache( - self, - network: Network, - demands_config: list[dict[str, Any]], - include_flow_details: bool = False, - include_used_edges: bool = False, - ) -> FlowIterationResult: - """Run demand placement using only FlowPolicy (no caching). - - This provides a reference implementation for equivalence testing. - """ - import netgraph_core - - from ngraph.analysis import AnalysisContext - from ngraph.analysis.demand import expand_demands - from ngraph.model.demand.spec import TrafficDemand - from ngraph.model.flow.policy_config import ( - FlowPolicyPreset, - create_flow_policy, - ) - from ngraph.results.flow import FlowEntry, FlowSummary - - # Reconstruct TrafficDemand objects - traffic_demands = [] - for config in demands_config: - demand = TrafficDemand( - source=config["source"], - sink=config["sink"], - demand=config["demand"], - mode=config.get("mode", "pairwise"), - flow_policy_config=config.get("flow_policy_config"), - priority=config.get("priority", 0), - ) - traffic_demands.append(demand) - - # Expand demands - expansion = expand_demands( - network, - traffic_demands, - default_policy_preset=FlowPolicyPreset.SHORTEST_PATHS_ECMP, - ) - - # Build context - ctx = AnalysisContext.from_network( - network, augmentations=expansion.augmentations - ) - - handle = ctx.handle - multidigraph = ctx.multidigraph - node_mapper = ctx.node_mapper - edge_mapper = ctx.edge_mapper - algorithms = ctx.algorithms - node_mask = ctx._build_node_mask(set()) - edge_mask = ctx._build_edge_mask(set()) - - flow_graph = netgraph_core.FlowGraph(multidigraph) - - # Place demands using ONLY FlowPolicy (no caching) - flow_entries: list[FlowEntry] = [] - total_demand = 0.0 - total_placed = 0.0 - - for demand in expansion.demands: - src_id = node_mapper.to_id(demand.src_name) - dst_id = node_mapper.to_id(demand.dst_name) - - policy = create_flow_policy( - algorithms, - handle, - demand.policy_preset, - node_mask=node_mask, - edge_mask=edge_mask, - ) - - placed, flow_count = policy.place_demand( - flow_graph, - src_id, - dst_id, - demand.priority, - demand.volume, - ) - - cost_distribution: dict[float, float] = {} - used_edges: set[str] = set() - - if include_flow_details or include_used_edges: - flows_dict = policy.flows - for flow_key, flow_data in flows_dict.items(): - if include_flow_details: - cost = float(flow_data[2]) - flow_vol = float(flow_data[3]) - if flow_vol > 0: - cost_distribution[cost] = ( - cost_distribution.get(cost, 0.0) + flow_vol - ) - - if include_used_edges: - flow_idx = netgraph_core.FlowIndex( - flow_key[0], flow_key[1], flow_key[2], flow_key[3] - ) - edges = flow_graph.get_flow_edges(flow_idx) - for edge_id, _ in edges: - edge_ref = edge_mapper.to_ref(edge_id, multidigraph) - if edge_ref is not None: - used_edges.add( - f"{edge_ref.link_id}:{edge_ref.direction}" - ) - - entry_data: dict[str, Any] = {} - if include_used_edges and used_edges: - entry_data["edges"] = sorted(used_edges) - entry_data["edges_kind"] = "used" - - entry = FlowEntry( - source=demand.src_name, - destination=demand.dst_name, - priority=demand.priority, - demand=demand.volume, - placed=placed, - dropped=demand.volume - placed, - cost_distribution=cost_distribution if include_flow_details else {}, - data=entry_data, - ) - flow_entries.append(entry) - total_demand += demand.volume - total_placed += placed - - overall_ratio = (total_placed / total_demand) if total_demand > 0 else 1.0 - dropped_flows = sum(1 for e in flow_entries if e.dropped > 0.0) - summary = FlowSummary( - total_demand=total_demand, - total_placed=total_placed, - overall_ratio=overall_ratio, - dropped_flows=dropped_flows, - num_flows=len(flow_entries), - ) - - return FlowIterationResult( - flows=flow_entries, - summary=summary, - data={}, - ) - def test_equivalence_ecmp_single_demand(self, mesh_network: Network) -> None: """Test that ECMP placement is equivalent with and without caching.""" demands_config = [ { "source": "A", - "sink": "D", - "demand": 80.0, + "target": "D", + "volume": 80.0, "mode": "pairwise", }, ] @@ -408,7 +395,7 @@ def test_equivalence_ecmp_single_demand(self, mesh_network: Network) -> None: ) # Run without caching (reference) - reference_result = self._run_demand_placement_without_cache( + reference_result = _run_demand_placement_without_cache( network=mesh_network, demands_config=demands_config, ) @@ -428,10 +415,10 @@ def test_equivalence_ecmp_single_demand(self, mesh_network: Network) -> None: def test_equivalence_ecmp_multiple_demands(self, mesh_network: Network) -> None: """Test ECMP placement equivalence with multiple demands.""" demands_config = [ - {"source": "A", "sink": "B", "demand": 30.0, "mode": "pairwise"}, - {"source": "A", "sink": "D", "demand": 40.0, "mode": "pairwise"}, - {"source": "C", "sink": "B", "demand": 25.0, "mode": "pairwise"}, - {"source": "C", "sink": "D", "demand": 35.0, "mode": "pairwise"}, + {"source": "A", "target": "B", "volume": 30.0, "mode": "pairwise"}, + {"source": "A", "target": "D", "volume": 40.0, "mode": "pairwise"}, + {"source": "C", "target": "B", "volume": 25.0, "mode": "pairwise"}, + {"source": "C", "target": "D", "volume": 35.0, "mode": "pairwise"}, ] cached_result = demand_placement_analysis( @@ -441,7 +428,7 @@ def test_equivalence_ecmp_multiple_demands(self, mesh_network: Network) -> None: demands_config=demands_config, ) - reference_result = self._run_demand_placement_without_cache( + reference_result = _run_demand_placement_without_cache( network=mesh_network, demands_config=demands_config, ) @@ -466,7 +453,7 @@ def test_equivalence_ecmp_multiple_demands(self, mesh_network: Network) -> None: def test_equivalence_with_flow_details(self, mesh_network: Network) -> None: """Test equivalence when include_flow_details is True.""" demands_config = [ - {"source": "A", "sink": "D", "demand": 50.0, "mode": "pairwise"}, + {"source": "A", "target": "D", "volume": 50.0, "mode": "pairwise"}, ] cached_result = demand_placement_analysis( @@ -477,7 +464,7 @@ def test_equivalence_with_flow_details(self, mesh_network: Network) -> None: include_flow_details=True, ) - reference_result = self._run_demand_placement_without_cache( + reference_result = _run_demand_placement_without_cache( network=mesh_network, demands_config=demands_config, include_flow_details=True, @@ -498,7 +485,7 @@ def test_equivalence_with_flow_details(self, mesh_network: Network) -> None: def test_equivalence_with_used_edges(self, mesh_network: Network) -> None: """Test equivalence when include_used_edges is True.""" demands_config = [ - {"source": "A", "sink": "D", "demand": 50.0, "mode": "pairwise"}, + {"source": "A", "target": "D", "volume": 50.0, "mode": "pairwise"}, ] cached_result = demand_placement_analysis( @@ -509,7 +496,7 @@ def test_equivalence_with_used_edges(self, mesh_network: Network) -> None: include_used_edges=True, ) - reference_result = self._run_demand_placement_without_cache( + reference_result = _run_demand_placement_without_cache( network=mesh_network, demands_config=demands_config, include_used_edges=True, @@ -526,11 +513,11 @@ def test_equivalence_with_used_edges(self, mesh_network: Network) -> None: class TestSPFCachingTEPolicy: - """Test SPF caching with TE_WCMP_UNLIM policy including fallback behavior.""" + """Test SPF caching with TE_WCMP_UNLIM policy including rerouting behavior.""" @pytest.fixture def constrained_network(self) -> Network: - """Create a network with limited capacity to test fallback. + """Create a network with limited capacity to test rerouting. Topology: A --> B --> D @@ -552,14 +539,14 @@ def constrained_network(self) -> Network: return network def test_te_wcmp_basic_placement(self, constrained_network: Network) -> None: - """Test TE_WCMP_UNLIM basic placement without fallback.""" + """Test TE_WCMP_UNLIM basic placement without rerouting.""" demands_config = [ { "source": "A", - "sink": "D", - "demand": 40.0, + "target": "D", + "volume": 40.0, "mode": "pairwise", - "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, + "flow_policy": FlowPolicyPreset.TE_WCMP_UNLIM, }, ] @@ -576,15 +563,15 @@ def test_te_wcmp_basic_placement(self, constrained_network: Network) -> None: assert flow.placed == 40.0 assert flow.dropped == 0.0 - def test_te_wcmp_fallback_on_saturation(self, constrained_network: Network) -> None: - """Test TE_WCMP_UNLIM fallback when primary path saturates.""" + def test_te_wcmp_reroute_on_saturation(self, constrained_network: Network) -> None: + """Test TE_WCMP_UNLIM rerouting when primary path saturates.""" demands_config = [ { "source": "A", - "sink": "D", - "demand": 80.0, # Exceeds primary path capacity + "target": "D", + "volume": 80.0, # Exceeds primary path capacity "mode": "pairwise", - "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, + "flow_policy": FlowPolicyPreset.TE_WCMP_UNLIM, }, ] @@ -615,18 +602,18 @@ def test_te_wcmp_multiple_demands_same_source( demands_config = [ { "source": "A", - "sink": "D", - "demand": 30.0, + "target": "D", + "volume": 30.0, "mode": "pairwise", - "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, + "flow_policy": FlowPolicyPreset.TE_WCMP_UNLIM, }, { "source": "A", - "sink": "D", - "demand": 30.0, + "target": "D", + "volume": 30.0, "mode": "pairwise", "priority": 1, # Different priority = different demand - "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, + "flow_policy": FlowPolicyPreset.TE_WCMP_UNLIM, }, ] @@ -670,8 +657,8 @@ def test_unreachable_destination(self, disconnected_network: Network) -> None: demands_config = [ { "source": "A", - "sink": "D", # Unreachable from A - "demand": 50.0, + "target": "D", # Unreachable from A + "volume": 50.0, "mode": "pairwise", }, ] @@ -698,8 +685,8 @@ def test_zero_demand(self) -> None: demands_config = [ { "source": "A", - "sink": "B", - "demand": 0.0, + "target": "B", + "volume": 0.0, "mode": "pairwise", }, ] @@ -726,8 +713,8 @@ def test_partial_placement_due_to_capacity(self) -> None: demands_config = [ { "source": "A", - "sink": "B", - "demand": 50.0, + "target": "B", + "volume": 50.0, "mode": "pairwise", }, ] @@ -755,8 +742,8 @@ def test_empty_cost_distribution_when_not_requested(self) -> None: demands_config = [ { "source": "A", - "sink": "B", - "demand": 50.0, + "target": "B", + "volume": 50.0, "mode": "pairwise", }, ] @@ -782,8 +769,8 @@ def test_empty_edges_when_not_requested(self) -> None: demands_config = [ { "source": "A", - "sink": "B", - "demand": 50.0, + "target": "B", + "volume": 50.0, "mode": "pairwise", }, ] @@ -821,17 +808,24 @@ def test_placement_with_excluded_link(self, triangle_network: Network) -> None: demands_config = [ { "source": "A", - "sink": "C", - "demand": 50.0, + "target": "C", + "volume": 50.0, "mode": "pairwise", }, ] + # Find the actual link ID for the A->C link + link_ac = next( + link + for link in triangle_network.links.values() + if link.source == "A" and link.target == "C" + ) + # Exclude direct A-C link, forcing traffic through B result = demand_placement_analysis( network=triangle_network, excluded_nodes=set(), - excluded_links={"link_A_C"}, # Link ID format + excluded_links={link_ac.id}, demands_config=demands_config, include_flow_details=True, ) @@ -849,8 +843,8 @@ def test_placement_with_excluded_node(self, triangle_network: Network) -> None: demands_config = [ { "source": "A", - "sink": "C", - "demand": 50.0, + "target": "C", + "volume": 50.0, "mode": "pairwise", }, ] @@ -875,34 +869,15 @@ def test_placement_with_excluded_node(self, triangle_network: Network) -> None: class TestSPFCachingCostDistribution: """Test cost distribution correctness with SPF caching.""" - @pytest.fixture - def multi_tier_network(self) -> Network: - """Create a network with multiple cost tiers. - - A --[cost=1]--> B --[cost=1]--> D (cost 2, capacity 30) - A --[cost=2]--> C --[cost=2]--> D (cost 4, capacity 30) - """ - network = Network() - for node in ["A", "B", "C", "D"]: - network.add_node(Node(node)) - - # Tier 1: cost 2, capacity 30 - network.add_link(Link("A", "B", capacity=30.0, cost=1.0)) - network.add_link(Link("B", "D", capacity=30.0, cost=1.0)) - - # Tier 2: cost 4, capacity 30 - network.add_link(Link("A", "C", capacity=30.0, cost=2.0)) - network.add_link(Link("C", "D", capacity=30.0, cost=2.0)) - - return network + # Uses multi_tier_network fixture from conftest.py def test_cost_distribution_single_tier(self, multi_tier_network: Network) -> None: """Test cost distribution when only one tier is used.""" demands_config = [ { "source": "A", - "sink": "D", - "demand": 25.0, # Fits in tier 1 + "target": "D", + "volume": 25.0, # Fits in tier 1 "mode": "pairwise", }, ] @@ -927,10 +902,10 @@ def test_cost_distribution_multiple_tiers_te_policy( demands_config = [ { "source": "A", - "sink": "D", - "demand": 50.0, # Exceeds tier 1 capacity + "target": "D", + "volume": 50.0, # Exceeds tier 1 capacity "mode": "pairwise", - "flow_policy_config": FlowPolicyPreset.TE_WCMP_UNLIM, + "flow_policy": FlowPolicyPreset.TE_WCMP_UNLIM, }, ] @@ -952,3 +927,292 @@ def test_cost_distribution_multiple_tiers_te_policy( assert total == pytest.approx(50.0, rel=1e-6) # Should have cost 2 (tier 1) and cost 4 (tier 2) assert len(flow.cost_distribution) >= 1 + + +# --------------------------------------------------------------------------- +# Combinatorial cross-validation tests +# --------------------------------------------------------------------------- + + +class TestCachedVsNonCachedEquivalence: + """Cross-validation: cached placement must equal FlowPolicy-based placement. + + Systematically tests combinations of: + - Policy presets (ECMP, WCMP, TE_WCMP_UNLIM) + - Demand patterns (single/multi source, single/multi destination) + - Capacity constraints (unconstrained, constrained) + """ + + @pytest.fixture + def multi_dest_constrained_network(self) -> Network: + """Network for testing with multiple destinations and constrained capacity. + + Topology: + A --> B --> D (cap 50) + | + +--> C --> D (cap 50) + | + +--> E (cap 50) + + This supports: + - Single source (A) to multiple destinations (D, E) + - Constrained capacity forcing TE rerouting + """ + network = Network() + for node in ["A", "B", "C", "D", "E"]: + network.add_node(Node(node)) + + # Primary path to D: A -> B -> D (cost 2, cap 50) + network.add_link(Link("A", "B", capacity=50.0, cost=1.0)) + network.add_link(Link("B", "D", capacity=50.0, cost=1.0)) + + # Secondary path to D: A -> C -> D (cost 4, cap 50) + network.add_link(Link("A", "C", capacity=50.0, cost=2.0)) + network.add_link(Link("C", "D", capacity=50.0, cost=2.0)) + + # Path to E: A -> C -> E (cost 4, cap 50) + network.add_link(Link("C", "E", capacity=50.0, cost=2.0)) + + return network + + @pytest.fixture + def overlapping_paths_network(self) -> Network: + """Network where paths to different destinations share edges. + + Topology: + A --> B --> D (cap 50, cost 1 each) + | + +--> E (cap 50, cost 1) + + A --> C --> D (cap 50, cost 2 each) + + Key properties: + - Paths to D: A->B->D (cost 2, cap 50) or A->C->D (cost 4, cap 50) + - Path to E: A->B->E (cost 2, cap 50) - shares A->B with primary path to D + + This topology exposes cache corruption because: + 1. Demand A->D (60 vol) saturates A->B via primary path, reroutes to A->C->D + 2. Residual-based DAG excludes A->B (0 capacity) + 3. If cached, demand A->E cannot find optimal path A->B->E + """ + network = Network() + for node in ["A", "B", "C", "D", "E"]: + network.add_node(Node(node)) + + # Primary path to D: A -> B -> D (cost 2, cap 50) + network.add_link(Link("A", "B", capacity=50.0, cost=1.0)) + network.add_link(Link("B", "D", capacity=50.0, cost=1.0)) + + # Path to E shares A->B: A -> B -> E (cost 2, cap 50) + network.add_link(Link("B", "E", capacity=50.0, cost=1.0)) + + # Secondary path to D: A -> C -> D (cost 4, cap 50) + network.add_link(Link("A", "C", capacity=50.0, cost=2.0)) + network.add_link(Link("C", "D", capacity=50.0, cost=2.0)) + + return network + + @pytest.fixture + def multi_source_multi_dest_network(self) -> Network: + """Network with multiple sources and multiple destinations. + + Topology: + A --> R --> D + | | + B ----+---> E + + Where R is a central router. + """ + network = Network() + for node in ["A", "B", "R", "D", "E"]: + network.add_node(Node(node)) + + # Sources to router + network.add_link(Link("A", "R", capacity=100.0, cost=1.0)) + network.add_link(Link("B", "R", capacity=100.0, cost=1.0)) + + # Router to destinations + network.add_link(Link("R", "D", capacity=100.0, cost=1.0)) + network.add_link(Link("R", "E", capacity=100.0, cost=1.0)) + + return network + + def _build_demands_config( + self, + sources: list[str], + dests: list[str], + volume: float, + preset: FlowPolicyPreset, + ) -> list[dict[str, Any]]: + """Build demands config from source/dest lists.""" + demands = [] + for src in sources: + for dst in dests: + demands.append( + { + "source": src, + "target": dst, + "volume": volume, + "mode": "pairwise", + "flow_policy": preset, + } + ) + return demands + + @pytest.mark.parametrize( + "preset,sources,dests,constrained", + [ + # ECMP tests (no TE loop) + (FlowPolicyPreset.SHORTEST_PATHS_ECMP, ["A"], ["D"], False), + (FlowPolicyPreset.SHORTEST_PATHS_ECMP, ["A"], ["D", "E"], False), + (FlowPolicyPreset.SHORTEST_PATHS_ECMP, ["A", "B"], ["D"], False), + # WCMP tests + (FlowPolicyPreset.SHORTEST_PATHS_WCMP, ["A"], ["D"], False), + (FlowPolicyPreset.SHORTEST_PATHS_WCMP, ["A"], ["D", "E"], False), + # TE_WCMP_UNLIM tests + (FlowPolicyPreset.TE_WCMP_UNLIM, ["A"], ["D"], False), + (FlowPolicyPreset.TE_WCMP_UNLIM, ["A"], ["D"], True), + (FlowPolicyPreset.TE_WCMP_UNLIM, ["A", "B"], ["D"], True), + (FlowPolicyPreset.TE_WCMP_UNLIM, ["A"], ["D", "E"], True), + ], + ids=[ + "ecmp_single_src_single_dest", + "ecmp_single_src_multi_dest", + "ecmp_multi_src_single_dest", + "wcmp_single_src_single_dest", + "wcmp_single_src_multi_dest", + "te_single_src_single_dest_unconstrained", + "te_single_src_single_dest_constrained", + "te_multi_src_single_dest_constrained", + "te_single_src_multi_dest_constrained", + ], + ) + def test_cached_equals_noncached( + self, + preset: FlowPolicyPreset, + sources: list[str], + dests: list[str], + constrained: bool, + multi_dest_constrained_network: Network, + multi_source_multi_dest_network: Network, + ) -> None: + """Cached placement must produce identical results to FlowPolicy placement.""" + # Select network based on source count + if len(sources) > 1: + network = multi_source_multi_dest_network + else: + network = multi_dest_constrained_network + + # Volume: use higher volume for constrained tests to trigger rerouting + volume = 60.0 if constrained else 30.0 + + demands_config = self._build_demands_config(sources, dests, volume, preset) + + # Run with caching (production implementation) + cached_result = demand_placement_analysis( + network=network, + excluded_nodes=set(), + excluded_links=set(), + demands_config=demands_config, + ) + + # Run without caching (reference implementation) + reference_result = _run_demand_placement_without_cache( + network=network, + demands_config=demands_config, + ) + + # Compare summaries + assert cached_result.summary.total_demand == pytest.approx( + reference_result.summary.total_demand, rel=1e-9 + ), "Total demand mismatch" + + assert cached_result.summary.total_placed == pytest.approx( + reference_result.summary.total_placed, rel=1e-9 + ), ( + f"Total placed mismatch: {cached_result.summary.total_placed} vs {reference_result.summary.total_placed}" + ) + + assert cached_result.summary.overall_ratio == pytest.approx( + reference_result.summary.overall_ratio, rel=1e-9 + ), "Overall ratio mismatch" + + # Compare individual flows + assert len(cached_result.flows) == len(reference_result.flows), ( + "Flow count mismatch" + ) + + for i, (cached_flow, ref_flow) in enumerate( + zip(cached_result.flows, reference_result.flows, strict=True) + ): + assert cached_flow.source == ref_flow.source, f"Flow {i}: source mismatch" + assert cached_flow.destination == ref_flow.destination, ( + f"Flow {i}: dest mismatch" + ) + assert cached_flow.demand == pytest.approx(ref_flow.demand, rel=1e-9), ( + f"Flow {i}: demand mismatch" + ) + assert cached_flow.placed == pytest.approx(ref_flow.placed, rel=1e-9), ( + f"Flow {i}: placed mismatch - cached={cached_flow.placed}, ref={ref_flow.placed}" + ) + + def test_te_overlapping_paths(self, overlapping_paths_network: Network) -> None: + """Test TE policy with overlapping paths to different destinations. + + This test specifically validates that cached placement handles the case where: + 1. First demand A->D saturates shared edge A->B, triggers TE rerouting + 2. Second demand A->E needs the same shared edge A->B for optimal path + + If residual-based DAGs were incorrectly cached, the second demand would + fail to find or use the optimal path through A->B. + """ + # First demand saturates A->B (cap 50), reroutes 10 to A->C->D + # Second demand needs A->B->E but A->B shows 0 residual in corrupted cache + demands_config = [ + { + "source": "A", + "target": "D", + "volume": 60.0, # Exceeds A->B->D capacity, triggers rerouting + "mode": "pairwise", + "flow_policy": FlowPolicyPreset.TE_WCMP_UNLIM, + }, + { + "source": "A", + "target": "E", + "volume": 30.0, # Should use A->B->E (optimal path) + "mode": "pairwise", + "flow_policy": FlowPolicyPreset.TE_WCMP_UNLIM, + }, + ] + + # Run with caching (production implementation) + cached_result = demand_placement_analysis( + network=overlapping_paths_network, + excluded_nodes=set(), + excluded_links=set(), + demands_config=demands_config, + ) + + # Run without caching (reference implementation) + reference_result = _run_demand_placement_without_cache( + network=overlapping_paths_network, + demands_config=demands_config, + ) + + # Compare total placed - should be identical + assert cached_result.summary.total_placed == pytest.approx( + reference_result.summary.total_placed, rel=1e-9 + ), ( + f"Total placed mismatch with overlapping paths: " + f"cached={cached_result.summary.total_placed}, " + f"ref={reference_result.summary.total_placed}" + ) + + # Compare individual flow placements + for i, (cached_flow, ref_flow) in enumerate( + zip(cached_result.flows, reference_result.flows, strict=True) + ): + assert cached_flow.placed == pytest.approx(ref_flow.placed, rel=1e-9), ( + f"Flow {i} ({cached_flow.source}->{cached_flow.destination}): " + f"placed mismatch - cached={cached_flow.placed}, ref={ref_flow.placed}" + ) diff --git a/tests/cli/test_cli.py b/tests/cli/test_cli.py index 6c2a40d..9043cab 100644 --- a/tests/cli/test_cli.py +++ b/tests/cli/test_cli.py @@ -160,10 +160,9 @@ def test_run_profile_flag_writes_results(tmp_path: Path, monkeypatch) -> None: links: - source: A target: B - link_params: - capacity: 1 + capacity: 1 workflow: - - step_type: NetworkStats + - type: NetworkStats name: stats """ ) @@ -190,7 +189,7 @@ def test_logging_levels_default_verbose_quiet( nodes: A: {} workflow: - - step_type: BuildGraph + - type: BuildGraph """ ) monkeypatch.chdir(tmp_path) @@ -229,10 +228,9 @@ def test_inspect_happy_path_prints_sections(tmp_path: Path) -> None: links: - source: A target: B - link_params: - capacity: 100 + capacity: 100 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build """ ) @@ -259,14 +257,12 @@ def test_inspect_detail_mode_includes_tables(tmp_path: Path) -> None: links: - source: A target: B - link_params: - capacity: 10 + capacity: 10 - source: B target: C - link_params: - capacity: 20 + capacity: 20 workflow: - - step_type: BuildGraph + - type: BuildGraph """ ) @@ -290,11 +286,10 @@ def test_inspect_detail_mode_cost_shows_decimals(tmp_path: Path) -> None: links: - source: A target: B - link_params: - capacity: 10 - cost: 0.1 + capacity: 10 + cost: 0.1 workflow: - - step_type: BuildGraph + - type: BuildGraph """ ) @@ -317,10 +312,10 @@ def test_inspect_workflow_node_selection_preview_basic(tmp_path: Path) -> None: src-2: {} dst-1: {} workflow: - - step_type: MaxFlow + - type: MaxFlow name: cap source: "^src" - sink: "^dst" + target: "^dst" """ ) @@ -329,7 +324,7 @@ def test_inspect_workflow_node_selection_preview_basic(tmp_path: Path) -> None: out = "\n".join(str(c.args[0]) for c in mprint.call_args_list) assert "Node selection preview:" in out - assert "source:" in out and "sink:" in out + assert "source:" in out and "target:" in out assert "groups" in out and "nodes" in out @@ -342,10 +337,10 @@ def test_inspect_workflow_node_selection_detail_and_warning(tmp_path: Path) -> N nodes: A: {} workflow: - - step_type: MaxFlow + - type: MaxFlow name: cap2 source: "^none" - sink: "^none" + target: "^none" """ ) @@ -370,15 +365,14 @@ def test_inspect_capacity_vs_demand_summary_basic(tmp_path: Path) -> None: links: - source: A target: B - link_params: - capacity: 100 -traffic_matrix_set: + capacity: 100 +demands: default: - source: "^A$" - sink: "^B$" - demand: 50 + target: "^B$" + volume: 50 workflow: - - step_type: BuildGraph + - type: BuildGraph """ ) @@ -388,7 +382,7 @@ def test_inspect_capacity_vs_demand_summary_basic(tmp_path: Path) -> None: out = "\n".join(str(c.args[0]) for c in mprint.call_args_list) assert "Capacity vs Demand:" in out assert "enabled link capacity: 100.0" in out - assert "total demand (all matrices): 50.0" in out + assert "total demand (all sets): 50.0" in out assert "capacity/demand: 2.00x" in out assert "demand/capacity: 50.00%" in out @@ -406,10 +400,9 @@ def test_run_profile_uses_output_dir_profiles(tmp_path: Path, monkeypatch) -> No links: - source: A target: B - link_params: - capacity: 1 + capacity: 1 workflow: - - step_type: NetworkStats + - type: NetworkStats name: stats """ ) diff --git a/tests/conftest.py b/tests/conftest.py index 033c790..8fd3ab5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,3 +1,105 @@ -"""Global pytest configuration.""" +"""Global pytest configuration and shared fixtures.""" from __future__ import annotations + +import pytest + +from ngraph import Link, Network, Node + +# ----------------------------------------------------------------------------- +# Shared Network Fixtures +# ----------------------------------------------------------------------------- + + +@pytest.fixture +def diamond_network() -> Network: + """Symmetric diamond network: A -> B,C -> D with equal capacity paths. + + Topology: + A -> B (cap 60, cost 1) -> D + A -> C (cap 60, cost 1) -> D + + Both paths have cost 2, total capacity 120. + Used for demand placement and basic flow tests. + """ + network = Network() + for node in ["A", "B", "C", "D"]: + network.add_node(Node(node)) + + network.add_link(Link("A", "B", capacity=60.0, cost=1.0)) + network.add_link(Link("A", "C", capacity=60.0, cost=1.0)) + network.add_link(Link("B", "D", capacity=60.0, cost=1.0)) + network.add_link(Link("C", "D", capacity=60.0, cost=1.0)) + + return network + + +def make_asymmetric_diamond( + *, + disable_node_b: bool = False, + disable_link_a_b: bool = False, +) -> Network: + """Factory for asymmetric diamond network with optional disabled elements. + + Topology: + A -> B (cap 5, cost 1) -> D (cap 5, cost 1) [tier 1: cost 2, cap 5] + A -> C (cap 3, cost 2) -> D (cap 3, cost 2) [tier 2: cost 4, cap 3] + + With both paths enabled: max flow = 8 (5 via B + 3 via C) + With B disabled: max flow = 3 (only via C) + With A->B link disabled: max flow = 3 (only via C) + + Args: + disable_node_b: If True, disable node B. + disable_link_a_b: If True, disable the A->B link. + + Returns: + Network with configured topology. + """ + net = Network() + net.add_node(Node("A")) + net.add_node(Node("B", disabled=disable_node_b)) + net.add_node(Node("C")) + net.add_node(Node("D")) + + net.add_link(Link("A", "B", capacity=5.0, cost=1.0, disabled=disable_link_a_b)) + net.add_link(Link("B", "D", capacity=5.0, cost=1.0)) + net.add_link(Link("A", "C", capacity=3.0, cost=2.0)) + net.add_link(Link("C", "D", capacity=3.0, cost=2.0)) + + return net + + +@pytest.fixture +def asymmetric_diamond() -> Network: + """Asymmetric diamond network with different cost tiers. + + Shortcut fixture for make_asymmetric_diamond() with defaults. + """ + return make_asymmetric_diamond() + + +@pytest.fixture +def multi_tier_network() -> Network: + """Multi-tier cost network with large capacity. + + Topology: + A -> B (cap 30, cost 1) -> D (cap 30, cost 1) [tier 1: cost 2] + A -> C (cap 30, cost 2) -> D (cap 30, cost 2) [tier 2: cost 4] + + Both tiers have equal capacity (30), but different costs. + Used for demand placement cost distribution tests. + """ + network = Network() + for node in ["A", "B", "C", "D"]: + network.add_node(Node(node)) + + # Tier 1: cost 2, capacity 30 + network.add_link(Link("A", "B", capacity=30.0, cost=1.0)) + network.add_link(Link("B", "D", capacity=30.0, cost=1.0)) + + # Tier 2: cost 4, capacity 30 + network.add_link(Link("A", "C", capacity=30.0, cost=2.0)) + network.add_link(Link("C", "D", capacity=30.0, cost=2.0)) + + return network diff --git a/tests/dsl/test_dot_notation_conditions.py b/tests/dsl/test_dot_notation_conditions.py index 4071f31..8e1bcc7 100644 --- a/tests/dsl/test_dot_notation_conditions.py +++ b/tests/dsl/test_dot_notation_conditions.py @@ -68,63 +68,61 @@ class TestEvaluateConditionWithDotNotation: def test_equality_nested(self): """Equality operator works with nested attributes.""" attrs = {"hardware": {"vendor": "Acme"}} - cond = Condition(attr="hardware.vendor", operator="==", value="Acme") + cond = Condition(attr="hardware.vendor", op="==", value="Acme") assert evaluate_condition(attrs, cond) is True def test_inequality_nested(self): """Inequality operator works with nested attributes.""" attrs = {"hardware": {"vendor": "Acme"}} - cond = Condition(attr="hardware.vendor", operator="!=", value="Other") + cond = Condition(attr="hardware.vendor", op="!=", value="Other") assert evaluate_condition(attrs, cond) is True def test_numeric_comparison_nested(self): """Numeric comparison works with nested attributes.""" attrs = {"metrics": {"latency": 50}} - cond = Condition(attr="metrics.latency", operator="<", value=100) + cond = Condition(attr="metrics.latency", op="<", value=100) assert evaluate_condition(attrs, cond) is True def test_contains_nested(self): """Contains operator works with nested attributes.""" attrs = {"config": {"tags": ["prod", "web"]}} - cond = Condition(attr="config.tags", operator="contains", value="prod") + cond = Condition(attr="config.tags", op="contains", value="prod") assert evaluate_condition(attrs, cond) is True - def test_any_value_nested_present(self): - """any_value operator with present nested attribute.""" + def test_exists_nested_present(self): + """exists operator with present nested attribute.""" attrs = {"hardware": {"vendor": "Acme"}} - cond = Condition(attr="hardware.vendor", operator="any_value") + cond = Condition(attr="hardware.vendor", op="exists") assert evaluate_condition(attrs, cond) is True - def test_any_value_nested_missing(self): - """any_value operator with missing nested attribute.""" + def test_exists_nested_missing(self): + """exists operator with missing nested attribute.""" attrs = {"hardware": {}} - cond = Condition(attr="hardware.vendor", operator="any_value") + cond = Condition(attr="hardware.vendor", op="exists") assert evaluate_condition(attrs, cond) is False - def test_no_value_nested_missing(self): - """no_value operator with missing nested attribute.""" + def test_not_exists_nested_missing(self): + """not_exists operator with missing nested attribute.""" attrs = {"hardware": {}} - cond = Condition(attr="hardware.vendor", operator="no_value") + cond = Condition(attr="hardware.vendor", op="not_exists") assert evaluate_condition(attrs, cond) is True - def test_no_value_nested_none(self): - """no_value operator with None nested attribute.""" + def test_not_exists_nested_none(self): + """not_exists operator with None nested attribute.""" attrs = {"hardware": {"vendor": None}} - cond = Condition(attr="hardware.vendor", operator="no_value") + cond = Condition(attr="hardware.vendor", op="not_exists") assert evaluate_condition(attrs, cond) is True - def test_backward_compatibility_simple(self): - """Simple (non-dotted) paths still work.""" + def test_simple_attribute_path(self): + """Simple (non-dotted) attribute paths work correctly.""" attrs = {"role": "spine", "tier": 2} - cond = Condition(attr="role", operator="==", value="spine") + cond = Condition(attr="role", op="==", value="spine") assert evaluate_condition(attrs, cond) is True def test_in_operator_nested(self): """in operator works with nested attributes.""" attrs = {"location": {"region": "us-west"}} - cond = Condition( - attr="location.region", operator="in", value=["us-west", "us-east"] - ) + cond = Condition(attr="location.region", op="in", value=["us-west", "us-east"]) assert evaluate_condition(attrs, cond) is True @@ -153,7 +151,7 @@ def test_deep_nesting_condition_evaluation(self): """Condition evaluation works with deeply nested paths.""" attrs = {"facility": {"datacenter": {"room": {"rack": {"pdu_zone": "A"}}}}} cond = Condition( - attr="facility.datacenter.room.rack.pdu_zone", operator="==", value="A" + attr="facility.datacenter.room.rack.pdu_zone", op="==", value="A" ) assert evaluate_condition(attrs, cond) is True @@ -175,12 +173,10 @@ def test_deep_nesting_missing_intermediate(self): def test_deep_nesting_with_list_at_leaf(self): """Deeply nested path ending in a list works with contains operator.""" attrs = {"network": {"fabric": {"tier": {"roles": ["spine", "border"]}}}} - cond = Condition( - attr="network.fabric.tier.roles", operator="contains", value="spine" - ) + cond = Condition(attr="network.fabric.tier.roles", op="contains", value="spine") assert evaluate_condition(attrs, cond) is True cond_miss = Condition( - attr="network.fabric.tier.roles", operator="contains", value="leaf" + attr="network.fabric.tier.roles", op="contains", value="leaf" ) assert evaluate_condition(attrs, cond_miss) is False diff --git a/tests/dsl/test_dsl_features_validation.py b/tests/dsl/test_dsl_features_validation.py new file mode 100644 index 0000000..036c98d --- /dev/null +++ b/tests/dsl/test_dsl_features_validation.py @@ -0,0 +1,358 @@ +"""Validation tests for DSL features to document in skill reference. + +These tests verify the behavior of DSL features to ensure documentation accuracy. +""" + +from ngraph.scenario import Scenario + + +class TestLinkMatchInLinkRules: + """Validate `link_match` in link_rules - filter by link's own attributes.""" + + def test_link_match_filters_by_capacity(self): + """Only links matching link_match conditions should be updated.""" + yaml_str = """ +network: + nodes: + A: {} + B: {} + links: + - {source: A, target: B, capacity: 100, cost: 1} + - {source: A, target: B, capacity: 500, cost: 1} + link_rules: + - source: A + target: B + link_match: + conditions: + - {attr: capacity, op: ">=", value: 400} + cost: 99 +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + # Check link costs + costs = [link.cost for link in net.links.values()] + assert sorted(costs) == [1, 99], f"Expected [1, 99], got {sorted(costs)}" + + # Verify only the high-capacity link was updated + for link in net.links.values(): + if link.capacity >= 400: + assert link.cost == 99, "High-capacity link should have cost 99" + else: + assert link.cost == 1, "Low-capacity link should have cost 1" + + def test_link_match_with_multiple_conditions(self): + """link_match with multiple conditions using AND logic.""" + yaml_str = """ +network: + nodes: + A: {} + B: {} + links: + - {source: A, target: B, capacity: 100, cost: 1, attrs: {type: fiber}} + - {source: A, target: B, capacity: 500, cost: 1, attrs: {type: copper}} + - {source: A, target: B, capacity: 500, cost: 1, attrs: {type: fiber}} + link_rules: + - source: A + target: B + link_match: + logic: and + conditions: + - {attr: capacity, op: ">=", value: 400} + - {attr: type, op: "==", value: fiber} + cost: 99 +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + # Only the high-capacity fiber link should have cost 99 + updated_count = sum(1 for link in net.links.values() if link.cost == 99) + assert updated_count == 1, f"Expected 1 updated link, got {updated_count}" + + +class TestMatchInNodeRules: + """Validate `match` in node_rules - filter nodes by attribute conditions.""" + + def test_match_filters_nodes(self): + """Only nodes matching conditions should be updated.""" + yaml_str = """ +network: + nodes: + srv1: {attrs: {role: compute, tier: 1}} + srv2: {attrs: {role: compute, tier: 2}} + srv3: {attrs: {role: storage, tier: 1}} + node_rules: + - path: ".*" + match: + logic: and + conditions: + - {attr: role, op: "==", value: compute} + - {attr: tier, op: ">=", value: 2} + disabled: true +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + # Only srv2 should be disabled + assert net.nodes["srv2"].disabled is True, "srv2 should be disabled" + assert net.nodes["srv1"].disabled is False, "srv1 should not be disabled" + assert net.nodes["srv3"].disabled is False, "srv3 should not be disabled" + + def test_match_with_or_logic(self): + """match with OR logic should update any matching node.""" + yaml_str = """ +network: + nodes: + srv1: {attrs: {role: compute, tier: 1}} + srv2: {attrs: {role: compute, tier: 2}} + srv3: {attrs: {role: storage, tier: 1}} + node_rules: + - path: ".*" + match: + logic: or + conditions: + - {attr: role, op: "==", value: storage} + - {attr: tier, op: ">=", value: 2} + attrs: + tagged: true +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + # srv2 (tier 2) and srv3 (storage) should be tagged + assert net.nodes["srv2"].attrs.get("tagged") is True + assert net.nodes["srv3"].attrs.get("tagged") is True + assert net.nodes["srv1"].attrs.get("tagged") is not True + + +class TestExpandInRules: + """Validate `expand` in node/link rules - variable expansion.""" + + def test_expand_in_node_rules(self): + """Variable expansion in node rules.""" + yaml_str = """ +network: + nodes: + dc1_srv1: {} + dc2_srv1: {} + dc3_srv1: {} + node_rules: + - path: "${dc}_srv1" + expand: + vars: + dc: [dc1, dc2] + mode: cartesian + attrs: + tagged: true +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + # dc1_srv1 and dc2_srv1 should be tagged, dc3_srv1 should not + assert net.nodes["dc1_srv1"].attrs.get("tagged") is True + assert net.nodes["dc2_srv1"].attrs.get("tagged") is True + assert net.nodes["dc3_srv1"].attrs.get("tagged") is not True + + def test_expand_in_link_rules(self): + """Variable expansion in link rules.""" + yaml_str = """ +network: + nodes: + dc1_srv: {} + dc2_srv: {} + dc3_srv: {} + links: + - {source: dc1_srv, target: dc2_srv, capacity: 100} + - {source: dc2_srv, target: dc3_srv, capacity: 100} + link_rules: + - source: "${src}_srv" + target: "${tgt}_srv" + expand: + vars: + src: [dc1] + tgt: [dc2] + mode: cartesian + capacity: 200 +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + for link in net.links.values(): + if link.source == "dc1_srv" and link.target == "dc2_srv": + assert link.capacity == 200, "dc1->dc2 link should have capacity 200" + else: + assert link.capacity == 100, "Other links should have capacity 100" + + +class TestNestedInlineNodes: + """Validate nested inline `nodes` - hierarchy without blueprints.""" + + def test_nested_nodes_creates_hierarchy(self): + """Nested nodes field creates hierarchical structure.""" + yaml_str = """ +network: + nodes: + datacenter: + nodes: + rack1: + count: 2 + template: "srv{n}" + rack2: + count: 2 + template: "srv{n}" +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + expected_nodes = { + "datacenter/rack1/srv1", + "datacenter/rack1/srv2", + "datacenter/rack2/srv1", + "datacenter/rack2/srv2", + } + actual_nodes = set(net.nodes.keys()) + + assert expected_nodes == actual_nodes, ( + f"Expected {expected_nodes}, got {actual_nodes}" + ) + + def test_nested_nodes_inherits_attrs(self): + """Nested nodes inherit parent attributes.""" + yaml_str = """ +network: + nodes: + datacenter: + attrs: + region: west + nodes: + rack1: + count: 1 + template: "srv{n}" + attrs: + role: compute +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + node = net.nodes["datacenter/rack1/srv1"] + assert node.attrs.get("region") == "west", "Should inherit parent attrs" + assert node.attrs.get("role") == "compute", "Should have own attrs" + + +class TestPathInGenerateBlocks: + """Validate `path` in generate blocks - narrow entities before grouping.""" + + def test_path_filters_nodes_in_generate(self): + """path filter narrows nodes before generating risk groups.""" + yaml_str = """ +network: + nodes: + prod_srv1: {attrs: {env: production}} + prod_srv2: {attrs: {env: production}} + dev_srv1: {attrs: {env: development}} + +risk_groups: + - generate: + scope: node + path: "^prod_.*" + group_by: env + name: "Env_${value}" +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + # Only Env_production should be created (not Env_development) + assert "Env_production" in net.risk_groups, "Env_production should exist" + assert "Env_development" not in net.risk_groups, ( + "Env_development should not exist" + ) + + # Check membership + assert "Env_production" in net.nodes["prod_srv1"].risk_groups + assert "Env_production" in net.nodes["prod_srv2"].risk_groups + + def test_path_filters_links_in_generate(self): + """path filter works on links in generate blocks.""" + yaml_str = """ +network: + nodes: + A: {} + B: {} + C: {} + links: + - {source: A, target: B, capacity: 100, attrs: {type: backbone}} + - {source: B, target: C, capacity: 100, attrs: {type: access}} + +risk_groups: + - generate: + scope: link + path: ".*A.*B.*" + group_by: type + name: "LinkType_${value}" +""" + scenario = Scenario.from_yaml(yaml_str) + net = scenario.network + + # Only LinkType_backbone should exist (A-B link) + assert "LinkType_backbone" in net.risk_groups + assert "LinkType_access" not in net.risk_groups + + +class TestInlineFlowPolicyObjects: + """Validate inline flow_policy objects - custom policy configs.""" + + def test_flow_policy_preset_string(self): + """Preset string flow_policy should work.""" + yaml_str = """ +network: + nodes: + A: {} + B: {} + links: + - {source: A, target: B, capacity: 100} + +demands: + test: + - source: A + target: B + volume: 100 + flow_policy: SHORTEST_PATHS_ECMP +""" + scenario = Scenario.from_yaml(yaml_str) + demands = scenario.demand_set.sets.get("test", []) + assert len(demands) == 1 + # flow_policy should be a FlowPolicyPreset enum + from ngraph.model.flow.policy_config import FlowPolicyPreset + + assert demands[0].flow_policy == FlowPolicyPreset.SHORTEST_PATHS_ECMP + + def test_flow_policy_inline_object_preserved(self): + """Inline object flow_policy should be preserved (not converted to preset).""" + yaml_str = """ +network: + nodes: + A: {} + B: {} + links: + - {source: A, target: B, capacity: 100} + +demands: + test: + - source: A + target: B + volume: 100 + flow_policy: + path_alg: SPF + flow_placement: PROPORTIONAL +""" + scenario = Scenario.from_yaml(yaml_str) + demands = scenario.demand_set.sets.get("test", []) + assert len(demands) == 1 + # Inline object should be preserved as dict + fp = demands[0].flow_policy + assert isinstance(fp, dict), f"Expected dict, got {type(fp)}" + assert fp.get("path_alg") == "SPF" + assert fp.get("flow_placement") == "PROPORTIONAL" + + +# Run with: pytest tests/dsl/test_dsl_features_validation.py -v diff --git a/tests/dsl/test_examples.py b/tests/dsl/test_examples.py index c52749c..5d6e97c 100644 --- a/tests/dsl/test_examples.py +++ b/tests/dsl/test_examples.py @@ -21,12 +21,11 @@ def test_basic_network_example(): links: - source: SEA target: SFO - link_params: - capacity: 200 - cost: 6846 - attrs: - distance_km: 1369.13 - media_type: "fiber" + capacity: 200 + cost: 6846 + attrs: + distance_km: 1369.13 + media_type: "fiber" """ scenario = Scenario.from_yaml(yaml_content) @@ -41,24 +40,23 @@ def test_groups_example(): """Test network groups with adjacency patterns.""" yaml_content = """ network: - groups: + nodes: direct_group_A: - node_count: 2 - name_template: "server-{node_num}" + count: 2 + template: "server-{n}" attrs: os: "linux" direct_group_B: - node_count: 2 - name_template: "switch-{node_num}" + count: 2 + template: "switch-{n}" attrs: type: "switch" - adjacency: + links: - source: /direct_group_A target: /direct_group_B pattern: "mesh" - link_params: - capacity: 100 - cost: 10 + capacity: 100 + cost: 10 """ scenario = Scenario.from_yaml(yaml_content) @@ -72,48 +70,47 @@ def test_adjacency_selector_match_filters_nodes(): """Adjacency selectors with match should filter nodes by attributes.""" yaml_content = """ network: - groups: + nodes: servers: - node_count: 4 - name_template: "srv-{node_num}" + count: 4 + template: "srv-{n}" attrs: role: "compute" rack: "rack-1" servers_b: - node_count: 2 - name_template: "srvb-{node_num}" + count: 2 + template: "srvb-{n}" attrs: role: "compute" rack: "rack-9" switches: - node_count: 2 - name_template: "sw-{node_num}" + count: 2 + template: "sw-{n}" attrs: tier: "spine" - adjacency: + links: - source: path: "/servers" match: logic: "and" conditions: - attr: "role" - operator: "==" + op: "==" value: "compute" - attr: "rack" - operator: "!=" + op: "!=" value: "rack-9" target: path: "/switches" match: conditions: - attr: "tier" - operator: "==" + op: "==" value: "spine" pattern: "mesh" - link_params: - capacity: 10 - cost: 1 + capacity: 10 + cost: 1 """ scenario = Scenario.from_yaml(yaml_content) @@ -127,15 +124,15 @@ def test_bracket_expansion(): yaml_content = """ blueprints: simple_pod: - groups: + nodes: switches: - node_count: 2 - name_template: "sw-{node_num}" + count: 2 + template: "sw-{n}" network: - groups: + nodes: pod[1-2]: - use_blueprint: simple_pod + blueprint: simple_pod """ scenario = Scenario.from_yaml(yaml_content) @@ -148,29 +145,28 @@ def test_blueprint_example(): yaml_content = """ blueprints: my_blueprint_name: - groups: + nodes: group_name_1: - node_count: 2 - name_template: "prefix-{node_num}" + count: 2 + template: "prefix-{n}" attrs: hw_type: "router_model_X" role: "leaf" risk_groups: ["RG1", "RG2"] group_name_2: - node_count: 2 - name_template: "spine-{node_num}" - adjacency: + count: 2 + template: "spine-{n}" + links: - source: /group_name_1 target: /group_name_2 pattern: "mesh" - link_params: - capacity: 100 - cost: 10 + capacity: 100 + cost: 10 network: - groups: + nodes: instance_of_bp: - use_blueprint: my_blueprint_name + blueprint: my_blueprint_name attrs: location: "rack1" @@ -256,7 +252,7 @@ def test_risk_groups_example(): assert len(rack1.children) == 2 -def test_traffic_matrix_set_example(): +def test_demand_set_example(): """Test traffic matrix set definition.""" yaml_content = """ network: @@ -274,11 +270,11 @@ def test_traffic_matrix_set_example(): attrs: role: "server" -traffic_matrix_set: +demands: default: - source: "source.*" - sink: "sink.*" - demand: 100 + target: "sink.*" + volume: 100 mode: "combine" priority: 1 attrs: @@ -286,12 +282,12 @@ def test_traffic_matrix_set_example(): """ scenario = Scenario.from_yaml(yaml_content) - default_demands = scenario.traffic_matrix_set.get_default_matrix() + default_demands = scenario.demand_set.get_default_set() assert len(default_demands) == 1 demand = default_demands[0] assert demand.source == "source.*" - assert demand.sink == "sink.*" - assert demand.demand == 100 + assert demand.target == "sink.*" + assert demand.volume == 100 assert demand.mode == "combine" @@ -307,22 +303,23 @@ def test_failure_policy_example(): attrs: role: "leaf" -failure_policy_set: +failures: default: - fail_risk_groups: true - fail_risk_group_children: false + expand_groups: true + expand_children: false attrs: custom_key: "value" modes: - weight: 1.0 rules: - - entity_scope: "node" - conditions: - - attr: "role" - operator: "==" - value: "spine" - logic: "and" - rule_type: "all" + - scope: "node" + match: + logic: "and" + conditions: + - attr: "role" + op: "==" + value: "spine" + mode: "all" """ scenario = Scenario.from_yaml(yaml_content) @@ -330,13 +327,13 @@ def test_failure_policy_example(): policies = scenario.failure_policy_set.get_all_policies() assert len(policies) > 0 default_policy = scenario.failure_policy_set.get_policy("default") - assert default_policy.fail_risk_groups - assert not default_policy.fail_risk_group_children + assert default_policy.expand_groups + assert not default_policy.expand_children assert len(default_policy.modes) == 1 mode = default_policy.modes[0] assert len(mode.rules) == 1 rule = mode.rules[0] - assert rule.entity_scope == "node" + assert rule.scope == "node" assert len(rule.conditions) == 1 @@ -349,7 +346,7 @@ def test_workflow_example(): node2: {} workflow: - - step_type: BuildGraph + - type: BuildGraph """ scenario = Scenario.from_yaml(yaml_content) @@ -369,17 +366,17 @@ def test_node_overrides_example(): yaml_content = """ blueprints: test_bp: - groups: + nodes: switches: - node_count: 3 - name_template: "switch-{node_num}" + count: 3 + template: "switch-{n}" network: - groups: + nodes: my_clos1: - use_blueprint: test_bp + blueprint: test_bp - node_overrides: + node_rules: - path: "^my_clos1/switches/switch-(1|3)$" disabled: true attrs: @@ -402,28 +399,26 @@ def test_link_overrides_example(): """Test link overrides functionality.""" yaml_content = """ network: - groups: + nodes: group1: - node_count: 2 - name_template: "node-{node_num}" + count: 2 + template: "node-{n}" group2: - node_count: 2 - name_template: "node-{node_num}" + count: 2 + template: "node-{n}" - adjacency: + links: - source: /group1 target: /group2 pattern: "mesh" - link_params: - capacity: 100 - cost: 10 + capacity: 100 + cost: 10 - link_overrides: + link_rules: - source: "^group1/node-1$" target: "^group2/node-1$" - link_params: - capacity: 200 - cost: 5 + capacity: 200 + cost: 5 """ scenario = Scenario.from_yaml(yaml_content) @@ -445,30 +440,30 @@ def test_variable_expansion(): yaml_content = """ blueprints: test_expansion: - groups: + nodes: plane1_rack: - node_count: 2 - name_template: "rack-{node_num}" + count: 2 + template: "rack-{n}" plane2_rack: - node_count: 2 - name_template: "rack-{node_num}" + count: 2 + template: "rack-{n}" spine: - node_count: 2 - name_template: "spine-{node_num}" - adjacency: + count: 2 + template: "spine-{n}" + links: - source: "plane${p}_rack" target: "spine" - expand_vars: - p: [1, 2] - expansion_mode: "cartesian" + expand: + vars: + p: [1, 2] + mode: "cartesian" pattern: "mesh" - link_params: - capacity: 100 + capacity: 100 network: - groups: + nodes: test_instance: - use_blueprint: test_expansion + blueprint: test_expansion """ scenario = Scenario.from_yaml(yaml_content) @@ -486,9 +481,9 @@ def test_unknown_blueprint_raises(): """Using an unknown blueprint should raise ValueError with a clear message.""" yaml_content = """ network: - groups: + nodes: use_missing: - use_blueprint: non_existent + blueprint: non_existent """ with pytest.raises(ValueError) as exc: @@ -500,12 +495,12 @@ def test_one_to_one_mismatch_raises(): """one_to_one requires sizes with a multiple factor; mismatch should error.""" yaml_content = """ network: - groups: + nodes: A: - node_count: 3 + count: 3 B: - node_count: 2 - adjacency: + count: 2 + links: - source: /A target: /B pattern: one_to_one @@ -517,25 +512,27 @@ def test_one_to_one_mismatch_raises(): def test_unknown_adjacency_pattern_raises(): - """Unknown adjacency pattern should raise ValueError.""" + """Unknown link pattern should raise ValidationError.""" + import jsonschema + yaml_content = """ network: nodes: N1: {} N2: {} - adjacency: + links: - source: /N1 target: /N2 pattern: non_existent_pattern """ - with pytest.raises(ValueError) as exc: + with pytest.raises(jsonschema.ValidationError) as exc: Scenario.from_yaml(yaml_content) - assert "Unknown adjacency pattern" in str(exc.value) + assert "is not one of ['mesh', 'one_to_one']" in str(exc.value) -def test_direct_link_same_node_raises(): - """A direct link with identical source and target should raise ValueError.""" +def test_direct_link_same_node_skipped(): + """A direct link with identical source and target is silently skipped (no self-loops).""" yaml_content = """ network: nodes: @@ -545,28 +542,29 @@ def test_direct_link_same_node_raises(): target: X """ - with pytest.raises(ValueError) as exc: - Scenario.from_yaml(yaml_content) - assert "Link cannot have the same source and target" in str(exc.value) + # Self-loop links are silently skipped by the mesh pattern + scenario = Scenario.from_yaml(yaml_content) + assert "X" in scenario.network.nodes + assert len(scenario.network.links) == 0 # No link created def test_nested_parameter_override_in_attrs(): - """Nested parameter override via parameters should modify node attrs.""" + """Nested parameter override via params should modify node attrs.""" yaml_content = """ blueprints: bp1: - groups: + nodes: leaf: - node_count: 1 + count: 1 attrs: some_field: nested_key: 111 network: - groups: + nodes: Main: - use_blueprint: bp1 - parameters: + blueprint: bp1 + params: leaf.attrs.some_field.nested_key: 999 """ @@ -582,20 +580,21 @@ def test_zip_variable_mismatch_raises(): """Zip expansion requires all lists same length; mismatch should raise.""" yaml_content = """ network: - groups: + nodes: RackA: - node_count: 1 + count: 1 RackB: - node_count: 1 + count: 1 RackC: - node_count: 1 - adjacency: + count: 1 + links: - source: /Rack${rack_id} target: /Rack${other_rack_id} - expand_vars: - rack_id: [A, B] - other_rack_id: [C, A, B] - expansion_mode: zip + expand: + vars: + rack_id: [A, B] + other_rack_id: [C, A, B] + mode: zip """ with pytest.raises(ValueError) as exc: @@ -603,8 +602,8 @@ def test_zip_variable_mismatch_raises(): assert "zip expansion requires equal-length lists" in str(exc.value) -def test_direct_link_unknown_node_raises(): - """Referencing an unknown node in a direct link should raise ValueError.""" +def test_direct_link_unknown_node_skipped(): + """Referencing an unknown node in a direct link creates no links (pattern finds no target).""" yaml_content = """ network: nodes: @@ -614,9 +613,10 @@ def test_direct_link_unknown_node_raises(): target: UnknownNode """ - with pytest.raises(ValueError) as exc: - Scenario.from_yaml(yaml_content) - assert "Link references unknown node(s)" in str(exc.value) + # Link with unknown target node is silently skipped (no matching target) + scenario = Scenario.from_yaml(yaml_content) + assert "KnownNode" in scenario.network.nodes + assert len(scenario.network.links) == 0 # No link created def test_group_by_selector_inside_blueprint(): @@ -630,40 +630,39 @@ def test_group_by_selector_inside_blueprint(): yaml_content = """ blueprints: bp_group: - groups: + nodes: leaf: - node_count: 2 - name_template: "leaf-{node_num}" + count: 2 + template: "leaf-{n}" attrs: role: "leaf" spine: - node_count: 1 - name_template: "spine-{node_num}" + count: 1 + template: "spine-{n}" attrs: role: "spine" - adjacency: + links: - source: group_by: "role" match: conditions: - attr: "role" - operator: "==" + op: "==" value: "leaf" target: group_by: "role" match: conditions: - attr: "role" - operator: "==" + op: "==" value: "spine" pattern: "mesh" - link_params: - capacity: 10 + capacity: 10 network: - groups: + nodes: pod1: - use_blueprint: bp_group + blueprint: bp_group """ scenario = Scenario.from_yaml(yaml_content) @@ -682,33 +681,33 @@ def test_group_by_with_variable_expansion(): yaml_content = """ blueprints: bp_group_vars: - groups: + nodes: leaf: - node_count: 2 - name_template: "leaf-{node_num}" + count: 2 + template: "leaf-{n}" attrs: src_role: "leaf" spine: - node_count: 1 - name_template: "spine-{node_num}" + count: 1 + template: "spine-{n}" attrs: dst_role: "spine" - adjacency: + links: - source: group_by: "${src_attr}" target: group_by: "${dst_attr}" - expand_vars: - src_attr: ["src_role"] - dst_attr: ["dst_role"] + expand: + vars: + src_attr: ["src_role"] + dst_attr: ["dst_role"] pattern: "mesh" - link_params: - capacity: 10 + capacity: 10 network: - groups: + nodes: pod1: - use_blueprint: bp_group_vars + blueprint: bp_group_vars """ scenario = Scenario.from_yaml(yaml_content) @@ -753,7 +752,7 @@ def test_direct_link_missing_required_keys_raises(): A: {} B: {} links: - - link_params: {capacity: 1} + - capacity: 1 """ with pytest.raises(ValueError) as exc: diff --git a/tests/dsl/test_expansion.py b/tests/dsl/test_expansion.py index a8bd459..209794d 100644 --- a/tests/dsl/test_expansion.py +++ b/tests/dsl/test_expansion.py @@ -28,19 +28,19 @@ class TestExpansionSpec: def test_default_values(self) -> None: """Default ExpansionSpec has empty vars and cartesian mode.""" spec = ExpansionSpec() - assert spec.expand_vars == {} - assert spec.expansion_mode == "cartesian" + assert spec.vars == {} + assert spec.mode == "cartesian" def test_is_empty(self) -> None: """is_empty returns True for empty expand_vars.""" assert ExpansionSpec().is_empty() is True - assert ExpansionSpec(expand_vars={"x": [1]}).is_empty() is False + assert ExpansionSpec(vars={"x": [1]}).is_empty() is False def test_custom_values(self) -> None: """Custom values are preserved.""" - spec = ExpansionSpec(expand_vars={"dc": [1, 2]}, expansion_mode="zip") - assert spec.expand_vars == {"dc": [1, 2]} - assert spec.expansion_mode == "zip" + spec = ExpansionSpec(vars={"dc": [1, 2]}, mode="zip") + assert spec.vars == {"dc": [1, 2]} + assert spec.mode == "zip" # ────────────────────────────────────────────────────────────────────────────── @@ -102,7 +102,7 @@ class TestExpandTemplatesCartesian: def test_single_var_expands(self) -> None: """Single variable expands to multiple results.""" - spec = ExpansionSpec(expand_vars={"dc": [1, 2, 3]}) + spec = ExpansionSpec(vars={"dc": [1, 2, 3]}) results = list(expand_templates({"path": "dc${dc}"}, spec)) assert len(results) == 3 @@ -112,7 +112,7 @@ def test_single_var_expands(self) -> None: def test_multiple_vars_cartesian(self) -> None: """Multiple variables create cartesian product.""" - spec = ExpansionSpec(expand_vars={"dc": [1, 2], "rack": ["a", "b"]}) + spec = ExpansionSpec(vars={"dc": [1, 2], "rack": ["a", "b"]}) results = list(expand_templates({"path": "dc${dc}_rack${rack}"}, spec)) assert len(results) == 4 # 2 * 2 @@ -124,14 +124,16 @@ def test_multiple_vars_cartesian(self) -> None: def test_multiple_templates(self) -> None: """Multiple template fields are all expanded.""" - spec = ExpansionSpec(expand_vars={"dc": [1, 2]}) + spec = ExpansionSpec(vars={"dc": [1, 2]}) results = list( - expand_templates({"source": "dc${dc}/leaf", "sink": "dc${dc}/spine"}, spec) + expand_templates( + {"source": "dc${dc}/leaf", "target": "dc${dc}/spine"}, spec + ) ) assert len(results) == 2 - assert results[0] == {"source": "dc1/leaf", "sink": "dc1/spine"} - assert results[1] == {"source": "dc2/leaf", "sink": "dc2/spine"} + assert results[0] == {"source": "dc1/leaf", "target": "dc1/spine"} + assert results[1] == {"source": "dc2/leaf", "target": "dc2/spine"} def test_empty_vars_yields_original(self) -> None: """Empty expand_vars yields original template.""" @@ -147,9 +149,7 @@ class TestExpandTemplatesZip: def test_zip_pairs_by_index(self) -> None: """Zip mode pairs variables by index.""" - spec = ExpansionSpec( - expand_vars={"src": ["a", "b"], "dst": ["x", "y"]}, expansion_mode="zip" - ) + spec = ExpansionSpec(vars={"src": ["a", "b"], "dst": ["x", "y"]}, mode="zip") results = list(expand_templates({"path": "${src}->${dst}"}, spec)) assert len(results) == 2 @@ -159,8 +159,8 @@ def test_zip_pairs_by_index(self) -> None: def test_zip_mismatched_lengths_raises(self) -> None: """Zip mode with mismatched list lengths raises.""" spec = ExpansionSpec( - expand_vars={"src": ["a", "b"], "dst": ["x", "y", "z"]}, - expansion_mode="zip", + vars={"src": ["a", "b"], "dst": ["x", "y", "z"]}, + mode="zip", ) with pytest.raises(ValueError, match="equal-length"): list(expand_templates({"path": "${src}->${dst}"}, spec)) @@ -173,7 +173,7 @@ def test_large_expansion_raises(self) -> None: """Expansion exceeding limit raises.""" # Create vars that would produce > 10,000 combinations spec = ExpansionSpec( - expand_vars={ + vars={ "a": list(range(50)), "b": list(range(50)), "c": list(range(50)), diff --git a/tests/dsl/test_parse_helpers.py b/tests/dsl/test_parse_helpers.py index 8b71a51..b646433 100644 --- a/tests/dsl/test_parse_helpers.py +++ b/tests/dsl/test_parse_helpers.py @@ -3,8 +3,7 @@ import pytest from ngraph.dsl.blueprints.parser import ( - check_adjacency_keys, - check_link_params, + check_link_keys, check_no_extra_keys, expand_name_patterns, join_paths, @@ -40,51 +39,34 @@ def test_check_no_extra_keys_allows_only_expected() -> None: assert "Unrecognized key(s) in ctx" in str(exc.value) -def test_check_adjacency_keys_valid_and_missing_required() -> None: - # Valid - check_adjacency_keys( +def test_check_link_keys_valid_and_missing_required() -> None: + """Test check_link_keys with valid and invalid link definitions.""" + # Valid with flat properties + check_link_keys( { "source": "A", "target": "B", "pattern": "mesh", - "link_count": 1, - "link_params": {}, + "count": 1, + "capacity": 100, + "cost": 10, }, - context="top-level adjacency", + context="top-level link", ) # Missing required keys with pytest.raises(ValueError) as exc: - check_adjacency_keys({"pattern": "mesh"}, context="adj") + check_link_keys({"pattern": "mesh"}, context="link") assert "must have 'source' and 'target'" in str(exc.value) # Extra key with pytest.raises(ValueError) as exc2: - check_adjacency_keys( + check_link_keys( { "source": "A", "target": "B", "unexpected": True, }, - context="adj", + context="link", ) - assert "Unrecognized key(s) in adj" in str(exc2.value) - - -def test_check_link_params_valid_and_extra_key() -> None: - # Valid set of keys - check_link_params( - { - "capacity": 1, - "cost": 2, - "disabled": False, - "risk_groups": ["RG"], - "attrs": {"k": "v"}, - }, - context="ctx", - ) - - # Extra key should raise - with pytest.raises(ValueError) as exc: - check_link_params({"capacity": 1, "extra": 0}, context="ctx") - assert "Unrecognized link_params key(s) in ctx" in str(exc.value) + assert "Unrecognized key(s) in link" in str(exc2.value) diff --git a/tests/dsl/test_risk_group_expansion.py b/tests/dsl/test_risk_group_expansion.py index 0a92b71..aaa3903 100644 --- a/tests/dsl/test_risk_group_expansion.py +++ b/tests/dsl/test_risk_group_expansion.py @@ -185,9 +185,9 @@ def test_group_risk_groups_expansion(self) -> None: """Group risk_groups array expands and inherits to all nodes.""" yaml_content = """ network: - groups: + nodes: rack_DC1_R1: - node_count: 2 + count: 2 risk_groups: ["CoolingZone_DC1_R1_CZ[A,B]"] risk_groups: @@ -202,20 +202,19 @@ def test_group_risk_groups_expansion(self) -> None: } def test_adjacency_link_risk_groups_expansion(self) -> None: - """Adjacency link_params risk_groups expands for conduit groups.""" + """Link risk_groups expands for conduit groups.""" yaml_content = """ network: - groups: + nodes: leaf: - node_count: 2 + count: 2 spine: - node_count: 2 - adjacency: + count: 2 + links: - source: /leaf target: /spine pattern: mesh - link_params: - risk_groups: ["Conduit_DC1_C[1-2]"] + risk_groups: ["Conduit_DC1_C[1-2]"] risk_groups: - name: "Conduit_DC1_C[1-2]" @@ -234,8 +233,7 @@ def test_direct_link_risk_groups_expansion(self) -> None: links: - source: NYC target: CHI - link_params: - risk_groups: ["FiberPair_NYC_CHI_FP[01,02,03]"] + risk_groups: ["FiberPair_NYC_CHI_FP[01,02,03]"] risk_groups: - name: "FiberPair_NYC_CHI_FP[01,02,03]" @@ -253,10 +251,10 @@ def test_node_override_risk_groups_expansion(self) -> None: """Node override risk_groups expands for building groups.""" yaml_content = """ network: - groups: + nodes: routers: - node_count: 2 - node_overrides: + count: 2 + node_rules: - path: routers risk_groups: ["Building_DC[1-2]"] @@ -272,20 +270,19 @@ def test_link_override_risk_groups_expansion(self) -> None: """Link override risk_groups expands for path groups.""" yaml_content = """ network: - groups: + nodes: leaf: - node_count: 2 + count: 2 spine: - node_count: 1 - adjacency: + count: 1 + links: - source: leaf target: spine pattern: mesh - link_overrides: + link_rules: - source: leaf target: spine - link_params: - risk_groups: ["Path_DC1_P[1-3]"] + risk_groups: ["Path_DC1_P[1-3]"] risk_groups: - name: "Path_DC1_P[1-3]" @@ -356,15 +353,15 @@ def test_inherited_plus_own_risk_groups(self) -> None: yaml_content = """ blueprints: rack: - groups: + nodes: servers: - node_count: 2 + count: 2 risk_groups: ["CoolingZone_CZ[A,B]"] network: - groups: + nodes: dc1_rack1: - use_blueprint: rack + blueprint: rack risk_groups: ["Building_DC1"] risk_groups: @@ -382,19 +379,19 @@ def test_inherited_plus_own_risk_groups(self) -> None: } def test_blueprint_risk_groups_expansion(self) -> None: - """Risk groups in blueprint groups expand correctly.""" + """Risk groups in blueprint nodes expand correctly.""" yaml_content = """ blueprints: fabric: - groups: + nodes: leaf: - node_count: 2 + count: 2 risk_groups: ["PowerZone_PZ[A,B]"] network: - groups: + nodes: dc1_fabric: - use_blueprint: fabric + blueprint: fabric risk_groups: - name: "PowerZone_PZ[A,B]" @@ -408,9 +405,9 @@ def test_definition_and_membership_consistency(self) -> None: """Expanded definitions and memberships reference same groups.""" yaml_content = """ network: - groups: + nodes: routers: - node_count: 3 + count: 3 risk_groups: ["Conduit_NYC_CHI_C[1-3]"] risk_groups: diff --git a/tests/dsl/test_rule_selectors.py b/tests/dsl/test_rule_selectors.py new file mode 100644 index 0000000..5d994e0 --- /dev/null +++ b/tests/dsl/test_rule_selectors.py @@ -0,0 +1,376 @@ +"""Tests for enhanced rule selector support in link_rules and node_rules. + +Tests that: +- link_rules supports full selectors (path + match) for source/target +- link_rules supports link_match for filtering by link attributes +- node_rules supports match conditions for filtering by node attributes +""" + +import pytest + +from ngraph.dsl.blueprints.expand import ( + _process_link_rules, + _process_node_rules, + expand_network_dsl, +) +from ngraph.model.network import Link, Network, Node + +# ────────────────────────────────────────────────────────────────────────────── +# Fixtures +# ────────────────────────────────────────────────────────────────────────────── + + +@pytest.fixture +def network_with_roles() -> Network: + """Create a network with nodes having role attributes and links with costs.""" + net = Network() + + # Spine nodes + net.add_node(Node("spine_1", attrs={"role": "spine", "tier": 2})) + net.add_node(Node("spine_2", attrs={"role": "spine", "tier": 2})) + + # Leaf nodes + net.add_node(Node("leaf_1", attrs={"role": "leaf", "tier": 1})) + net.add_node(Node("leaf_2", attrs={"role": "leaf", "tier": 1})) + net.add_node(Node("leaf_3", attrs={"role": "leaf", "tier": 1})) + + # Links with varying capacities + net.add_link(Link(source="spine_1", target="leaf_1", capacity=100, cost=1)) + net.add_link(Link(source="spine_1", target="leaf_2", capacity=50, cost=2)) + net.add_link(Link(source="spine_1", target="leaf_3", capacity=100, cost=1)) + net.add_link(Link(source="spine_2", target="leaf_1", capacity=100, cost=1)) + net.add_link(Link(source="spine_2", target="leaf_2", capacity=100, cost=1)) + net.add_link(Link(source="spine_2", target="leaf_3", capacity=50, cost=2)) + + return net + + +# ────────────────────────────────────────────────────────────────────────────── +# link_rules Full Selector Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestLinkRulesFullSelectors: + """Tests for full selector support in link_rules source/target.""" + + def test_link_rules_string_selector_still_works( + self, network_with_roles: Network + ) -> None: + """String selectors in link_rules work as expected.""" + scenario = { + "network": { + "nodes": {}, + "links": [], + "link_rules": [ + { + "source": "spine_1", + "target": "leaf_.*", + "capacity": 200, + } + ], + } + } + + # Apply rules to existing network + _process_link_rules(network_with_roles, scenario["network"]) + + # Links from spine_1 should have updated capacity + spine1_links = [ + link + for link in network_with_roles.links.values() + if link.source == "spine_1" + ] + assert all(link.capacity == 200 for link in spine1_links) + + # Links from spine_2 should be unchanged + spine2_links = [ + link + for link in network_with_roles.links.values() + if link.source == "spine_2" + ] + assert any(link.capacity == 100 for link in spine2_links) + + def test_link_rules_dict_selector_with_match( + self, network_with_roles: Network + ) -> None: + """Dict selectors with match conditions work in link_rules.""" + scenario = { + "network": { + "link_rules": [ + { + "source": { + "path": ".*", + "match": { + "conditions": [ + {"attr": "role", "op": "==", "value": "spine"} + ] + }, + }, + "target": { + "path": ".*", + "match": { + "conditions": [ + {"attr": "role", "op": "==", "value": "leaf"} + ] + }, + }, + "attrs": {"tagged": True}, + } + ], + } + } + + _process_link_rules(network_with_roles, scenario["network"]) + + # All spine->leaf links should have the 'tagged' attr + for link in network_with_roles.links.values(): + src_node = network_with_roles.nodes[link.source] + tgt_node = network_with_roles.nodes[link.target] + if ( + src_node.attrs.get("role") == "spine" + and tgt_node.attrs.get("role") == "leaf" + ): + assert link.attrs.get("tagged") is True + + +# ────────────────────────────────────────────────────────────────────────────── +# link_match Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestLinkMatch: + """Tests for link_match filtering in link_rules.""" + + def test_link_match_filters_by_capacity(self, network_with_roles: Network) -> None: + """link_match filters links by their own attributes (capacity).""" + scenario = { + "network": { + "link_rules": [ + { + "source": ".*", + "target": ".*", + "link_match": { + "conditions": [ + {"attr": "capacity", "op": "<", "value": 100} + ] + }, + "risk_groups": ["low_capacity"], + } + ], + } + } + + _process_link_rules(network_with_roles, scenario["network"]) + + # Only links with capacity < 100 should have risk_groups + for link in network_with_roles.links.values(): + if link.capacity < 100: + assert "low_capacity" in link.risk_groups + else: + assert "low_capacity" not in link.risk_groups + + def test_link_match_filters_by_cost(self, network_with_roles: Network) -> None: + """link_match filters links by cost attribute.""" + scenario = { + "network": { + "link_rules": [ + { + "source": ".*", + "target": ".*", + "link_match": { + "conditions": [{"attr": "cost", "op": ">", "value": 1}] + }, + "disabled": True, + } + ], + } + } + + _process_link_rules(network_with_roles, scenario["network"]) + + # Only links with cost > 1 should be disabled + for link in network_with_roles.links.values(): + if link.cost > 1: + assert link.disabled is True + else: + assert link.disabled is False + + def test_link_match_combined_with_endpoint_selectors( + self, network_with_roles: Network + ) -> None: + """link_match works with endpoint selectors.""" + scenario = { + "network": { + "link_rules": [ + { + "source": "spine_1", + "target": "leaf_.*", + "link_match": { + "conditions": [ + {"attr": "capacity", "op": "==", "value": 100} + ] + }, + "attrs": {"high_cap_spine1": True}, + } + ], + } + } + + _process_link_rules(network_with_roles, scenario["network"]) + + # Only spine_1 -> leaf_* links with capacity=100 should have the attr + for link in network_with_roles.links.values(): + if ( + link.source == "spine_1" + and link.target.startswith("leaf_") + and link.capacity == 100 + ): + assert link.attrs.get("high_cap_spine1") is True + else: + assert link.attrs.get("high_cap_spine1") is None + + +# ────────────────────────────────────────────────────────────────────────────── +# node_rules match Tests +# ────────────────────────────────────────────────────────────────────────────── + + +class TestNodeRulesMatch: + """Tests for match support in node_rules.""" + + def test_node_rules_match_filters_by_attribute( + self, network_with_roles: Network + ) -> None: + """node_rules match filters nodes by attribute conditions.""" + scenario = { + "network": { + "node_rules": [ + { + "path": ".*", + "match": { + "conditions": [ + {"attr": "role", "op": "==", "value": "spine"} + ] + }, + "attrs": {"is_spine": True}, + } + ], + } + } + + _process_node_rules(network_with_roles, scenario["network"]) + + # Only spine nodes should have the attr + for node in network_with_roles.nodes.values(): + if node.attrs.get("role") == "spine": + assert node.attrs.get("is_spine") is True + else: + assert node.attrs.get("is_spine") is None + + def test_node_rules_match_with_tier(self, network_with_roles: Network) -> None: + """node_rules match works with numeric comparison.""" + scenario = { + "network": { + "node_rules": [ + { + "path": ".*", + "match": { + "conditions": [{"attr": "tier", "op": "==", "value": 1}] + }, + "risk_groups": ["edge_tier"], + } + ], + } + } + + _process_node_rules(network_with_roles, scenario["network"]) + + # Only tier 1 nodes should have risk_groups + for node in network_with_roles.nodes.values(): + if node.attrs.get("tier") == 1: + assert "edge_tier" in node.risk_groups + else: + assert "edge_tier" not in node.risk_groups + + def test_node_rules_path_and_match_combined( + self, network_with_roles: Network + ) -> None: + """node_rules with both path and match applies both filters.""" + scenario = { + "network": { + "node_rules": [ + { + "path": "leaf_.*", + "match": { + "conditions": [{"attr": "tier", "op": "==", "value": 1}] + }, + "attrs": {"leaf_tier1": True}, + } + ], + } + } + + _process_node_rules(network_with_roles, scenario["network"]) + + # Only leaf nodes with tier 1 should have the attr + for node in network_with_roles.nodes.values(): + if node.name.startswith("leaf_") and node.attrs.get("tier") == 1: + assert node.attrs.get("leaf_tier1") is True + else: + assert node.attrs.get("leaf_tier1") is None + + +# ────────────────────────────────────────────────────────────────────────────── +# Integration: Full Network Expansion with Rules +# ────────────────────────────────────────────────────────────────────────────── + + +class TestRulesIntegration: + """Integration tests for rules with full network expansion.""" + + def test_full_scenario_with_rules(self) -> None: + """Full scenario with node_rules and link_rules works end-to-end.""" + scenario = { + "network": { + "nodes": { + "spine_[1-2]": {"count": 1, "attrs": {"role": "spine", "tier": 2}}, + "leaf_[1-3]": {"count": 1, "attrs": {"role": "leaf", "tier": 1}}, + }, + "links": [{"source": "spine_.*", "target": "leaf_.*", "capacity": 100}], + "node_rules": [ + { + "path": ".*", + "match": { + "conditions": [ + {"attr": "role", "op": "==", "value": "spine"} + ] + }, + "attrs": {"critical": True}, + } + ], + "link_rules": [ + { + "source": ".*", + "target": ".*", + "link_match": { + "conditions": [ + {"attr": "capacity", "op": ">=", "value": 100} + ] + }, + "attrs": {"high_capacity": True}, + } + ], + } + } + + net = expand_network_dsl(scenario) + + # Verify spine nodes have critical attr + for node in net.nodes.values(): + if node.attrs.get("role") == "spine": + assert node.attrs.get("critical") is True + + # Verify high capacity links have the attr + for link in net.links.values(): + if link.capacity >= 100: + assert link.attrs.get("high_capacity") is True diff --git a/tests/dsl/test_selectors.py b/tests/dsl/test_selectors.py index e91034e..f002ed0 100644 --- a/tests/dsl/test_selectors.py +++ b/tests/dsl/test_selectors.py @@ -82,7 +82,7 @@ def test_group_by_only_valid(self) -> None: def test_match_only_valid(self) -> None: """NodeSelector with only match is valid.""" - cond = Condition(attr="role", operator="==", value="leaf") + cond = Condition(attr="role", op="==", value="leaf") match = MatchSpec(conditions=[cond]) sel = NodeSelector(match=match) assert sel.match is not None @@ -91,7 +91,7 @@ def test_match_only_valid(self) -> None: def test_all_fields_valid(self) -> None: """NodeSelector with all fields is valid.""" - cond = Condition(attr="role", operator="==", value="leaf") + cond = Condition(attr="role", op="==", value="leaf") match = MatchSpec(conditions=[cond]) sel = NodeSelector(path="^dc1/.*", group_by="role", match=match) assert sel.path == "^dc1/.*" @@ -135,7 +135,7 @@ def test_dict_with_match(self) -> None: sel = normalize_selector( { "match": { - "conditions": [{"attr": "role", "operator": "==", "value": "leaf"}], + "conditions": [{"attr": "role", "op": "==", "value": "leaf"}], "logic": "and", } }, @@ -151,9 +151,7 @@ def test_dict_combined_fields(self) -> None: { "path": "^dc1/.*", "group_by": "role", - "match": { - "conditions": [{"attr": "tier", "operator": "==", "value": 1}] - }, + "match": {"conditions": [{"attr": "tier", "op": "==", "value": 1}]}, }, "demand", ) @@ -263,9 +261,7 @@ def test_match_filters_nodes(self, attributed_network: Network) -> None: """Match conditions filter nodes.""" sel = NodeSelector( path=".*", - match=MatchSpec( - conditions=[Condition(attr="role", operator="==", value="leaf")] - ), + match=MatchSpec(conditions=[Condition(attr="role", op="==", value="leaf")]), ) groups = select_nodes(attributed_network, sel, default_active_only=False) @@ -280,8 +276,8 @@ def test_match_with_and_logic(self, attributed_network: Network) -> None: path=".*", match=MatchSpec( conditions=[ - Condition(attr="role", operator="==", value="leaf"), - Condition(attr="dc", operator="==", value="dc1"), + Condition(attr="role", op="==", value="leaf"), + Condition(attr="dc", op="==", value="dc1"), ], logic="and", ), @@ -298,8 +294,8 @@ def test_match_with_or_logic(self, attributed_network: Network) -> None: path=".*", match=MatchSpec( conditions=[ - Condition(attr="role", operator="==", value="leaf"), - Condition(attr="role", operator="==", value="spine"), + Condition(attr="role", op="==", value="leaf"), + Condition(attr="role", op="==", value="spine"), ], logic="or", ), @@ -389,6 +385,35 @@ def test_group_by_missing_attribute_excludes_nodes( node_names = [n.name for n in all_nodes] assert "orphan" not in node_names + def test_group_by_disabled_field(self, attributed_network: Network) -> None: + """group_by can use top-level fields like disabled.""" + # Disable one node for testing + attributed_network.nodes["dc1_leaf_1"].disabled = True + + sel = NodeSelector(path=".*", group_by="disabled") + groups = select_nodes(attributed_network, sel, default_active_only=False) + + assert "True" in groups + assert "False" in groups + # Verify disabled node is in the True group + disabled_names = [n.name for n in groups["True"]] + assert "dc1_leaf_1" in disabled_names + # Verify enabled nodes are in the False group + enabled_names = [n.name for n in groups["False"]] + assert len(enabled_names) > 0 + assert "dc1_leaf_1" not in enabled_names + + def test_group_by_name_field(self, attributed_network: Network) -> None: + """group_by can use top-level name field.""" + sel = NodeSelector(path="dc1_leaf.*", group_by="name") + groups = select_nodes(attributed_network, sel, default_active_only=False) + + # Each node should be in its own group (keyed by name) + assert "dc1_leaf_1" in groups + assert "dc1_leaf_2" in groups + assert len(groups["dc1_leaf_1"]) == 1 + assert len(groups["dc1_leaf_2"]) == 1 + class TestSelectNodesMatchOnly: """Tests for match-only selectors (no path specified).""" @@ -398,7 +423,7 @@ def test_match_only_selects_all_then_filters( ) -> None: """Match-only selector starts with all nodes, then filters.""" sel = NodeSelector( - match=MatchSpec(conditions=[Condition(attr="tier", operator="==", value=2)]) + match=MatchSpec(conditions=[Condition(attr="tier", op="==", value=2)]) ) groups = select_nodes(attributed_network, sel, default_active_only=False) @@ -491,20 +516,20 @@ def test_not_in_operator(self) -> None: ) assert evaluate_condition(attrs, Condition("x", "not_in", ["d", "e"])) is False - def test_any_value_operator(self) -> None: - """Test any_value operator (attribute exists and is not None).""" + def test_exists_operator(self) -> None: + """Test exists operator (attribute exists and is not None).""" attrs = {"x": 0, "y": None, "z": ""} - assert evaluate_condition(attrs, Condition("x", "any_value")) is True - assert evaluate_condition(attrs, Condition("y", "any_value")) is False - assert evaluate_condition(attrs, Condition("z", "any_value")) is True - assert evaluate_condition(attrs, Condition("missing", "any_value")) is False + assert evaluate_condition(attrs, Condition("x", "exists")) is True + assert evaluate_condition(attrs, Condition("y", "exists")) is False + assert evaluate_condition(attrs, Condition("z", "exists")) is True + assert evaluate_condition(attrs, Condition("missing", "exists")) is False - def test_no_value_operator(self) -> None: - """Test no_value operator (attribute missing or None).""" + def test_not_exists_operator(self) -> None: + """Test not_exists operator (attribute missing or None).""" attrs = {"x": 0, "y": None} - assert evaluate_condition(attrs, Condition("x", "no_value")) is False - assert evaluate_condition(attrs, Condition("y", "no_value")) is True - assert evaluate_condition(attrs, Condition("missing", "no_value")) is True + assert evaluate_condition(attrs, Condition("x", "not_exists")) is False + assert evaluate_condition(attrs, Condition("y", "not_exists")) is True + assert evaluate_condition(attrs, Condition("missing", "not_exists")) is True def test_missing_attribute_returns_false(self) -> None: """Missing attribute returns False for most operators.""" diff --git a/tests/dsl/test_skill_examples_validation.py b/tests/dsl/test_skill_examples_validation.py new file mode 100644 index 0000000..9f4d6dc --- /dev/null +++ b/tests/dsl/test_skill_examples_validation.py @@ -0,0 +1,1377 @@ +"""Comprehensive validation of all 19 examples from EXAMPLES.md in the Claude skill. + +This test file validates that every example in the Claude skill documentation +parses correctly and produces the expected results. +""" + +import pytest + +from ngraph.scenario import Scenario + + +# ============================================================================= +# Example 1: Simple Data Center +# ============================================================================= +def test_example_1_simple_data_center(): + """Example 1: Simple Data Center - leaf-spine topology with traffic analysis. + + Expected: 6 nodes (4 leaf + 2 spine), 8 links (4x2 mesh) + """ + yaml_content = """ +network: + nodes: + leaf: + count: 4 + template: "leaf{n}" + attrs: + role: leaf + spine: + count: 2 + template: "spine{n}" + attrs: + role: spine + links: + - source: /leaf + target: /spine + pattern: mesh + capacity: 100 + cost: 1 + +demands: + default: + - source: "^leaf/.*" + target: "^leaf/.*" + volume: 50 + mode: pairwise + +failures: + single_link: + modes: + - weight: 1.0 + rules: + - scope: link + mode: choice + count: 1 + +workflow: + - type: NetworkStats + name: stats +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count: 4 leaf + 2 spine = 6 + assert len(scenario.network.nodes) == 6, ( + f"Expected 6 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count: 4 leaf * 2 spine = 8 mesh links + assert len(scenario.network.links) == 8, ( + f"Expected 8 links, got {len(scenario.network.links)}" + ) + + # Validate node names + expected_nodes = [ + "leaf/leaf1", + "leaf/leaf2", + "leaf/leaf3", + "leaf/leaf4", + "spine/spine1", + "spine/spine2", + ] + for name in expected_nodes: + assert name in scenario.network.nodes, f"Missing node: {name}" + + # Validate demands + demands = scenario.demand_set.get_default_set() + assert len(demands) == 1 + + # Validate failure policy + policy = scenario.failure_policy_set.get_policy("single_link") + assert policy is not None + assert len(policy.modes) == 1 + + +# ============================================================================= +# Example 2: Multi-Pod with Blueprint +# ============================================================================= +def test_example_2_multi_pod_blueprint(): + """Example 2: Multi-Pod with Blueprint - two pods sharing a blueprint. + + Expected: 12 nodes (2 pods x 6 nodes), 20 links (16 internal + 4 inter-pod) + """ + yaml_content = """ +blueprints: + clos_pod: + nodes: + leaf: + count: 4 + template: "leaf{n}" + attrs: + role: leaf + spine: + count: 2 + template: "spine{n}" + attrs: + role: spine + links: + - source: /leaf + target: /spine + pattern: mesh + capacity: 100 + +network: + nodes: + pod[1-2]: + blueprint: clos_pod + + links: + - source: + path: "pod1/spine" + match: + conditions: + - attr: role + op: "==" + value: spine + target: + path: "pod2/spine" + pattern: mesh + capacity: 400 +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count: 2 pods * (4 leaf + 2 spine) = 12 + assert len(scenario.network.nodes) == 12, ( + f"Expected 12 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count: 2 pods * 8 internal + 4 inter-pod = 20 + assert len(scenario.network.links) == 20, ( + f"Expected 20 links, got {len(scenario.network.links)}" + ) + + +# ============================================================================= +# Example 3: Backbone with Risk Groups +# ============================================================================= +def test_example_3_backbone_risk_groups(): + """Example 3: Backbone with Risk Groups - WAN with shared-risk link groups. + + Expected: 3 nodes, 3 links, 2 risk groups + """ + yaml_content = """ +network: + nodes: + NewYork: {attrs: {site_type: core}} + Chicago: {attrs: {site_type: core}} + LosAngeles: {attrs: {site_type: core}} + + links: + # Parallel diverse paths + - source: NewYork + target: Chicago + capacity: 100 + cost: 10 + risk_groups: [RG_NY_CHI] + - source: NewYork + target: Chicago + capacity: 100 + cost: 10 + # Single path + - source: Chicago + target: LosAngeles + capacity: 100 + cost: 15 + risk_groups: [RG_CHI_LA] + +risk_groups: + - name: RG_NY_CHI + attrs: + corridor: NYC-Chicago + distance_km: 1200 + - name: RG_CHI_LA + attrs: + corridor: Chicago-LA + distance_km: 2800 + +failures: + srlg_failure: + modes: + - weight: 1.0 + rules: + - scope: risk_group + mode: choice + count: 1 +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 3, ( + f"Expected 3 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count + assert len(scenario.network.links) == 3, ( + f"Expected 3 links, got {len(scenario.network.links)}" + ) + + # Validate risk groups + assert len(scenario.network.risk_groups) == 2, ( + f"Expected 2 risk groups, got {len(scenario.network.risk_groups)}" + ) + assert "RG_NY_CHI" in scenario.network.risk_groups + assert "RG_CHI_LA" in scenario.network.risk_groups + + +# ============================================================================= +# Example 4: Variable Expansion at Scale +# ============================================================================= +def test_example_4_variable_expansion(): + """Example 4: Variable Expansion at Scale - large fabric. + + Expected: 1540 nodes (4x8x48 compute + 4 spine), 6144 links + """ + yaml_content = """ +network: + nodes: + plane[1-4]/rack[1-8]: + count: 48 + template: "server{n}" + attrs: + role: compute + + fabric/spine[1-4]: + count: 1 + template: "spine" + attrs: + role: spine + + links: + - source: "plane${p}/rack${r}" + target: "fabric/spine${s}" + expand: + vars: + p: [1, 2, 3, 4] + r: [1, 2, 3, 4, 5, 6, 7, 8] + s: [1, 2, 3, 4] + mode: cartesian + pattern: mesh + capacity: 100 +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count: 4 planes * 8 racks * 48 servers + 4 spine = 1536 + 4 = 1540 + assert len(scenario.network.nodes) == 1540, ( + f"Expected 1540 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count: + # Each (plane, rack) combination connects to all 4 spines with mesh pattern + # 4 planes * 8 racks = 32 combinations + # Each combination: 48 servers mesh with 1 spine node = 48 links per spine + # 32 combinations * 4 spines * 48 = 6144 links + assert len(scenario.network.links) == 6144, ( + f"Expected 6144 links, got {len(scenario.network.links)}" + ) + + +# ============================================================================= +# Example 5: Full Mesh Topology +# ============================================================================= +def test_example_5_full_mesh(): + """Example 5: Full Mesh Topology - 4-node full mesh for testing. + + Expected: 4 nodes, 6 links (full mesh) + """ + yaml_content = """ +seed: 42 + +network: + nodes: + N1: {} + N2: {} + N3: {} + N4: {} + + links: + - source: N1 + target: N2 + capacity: 2.0 + cost: 1.0 + - source: N1 + target: N3 + capacity: 1.0 + cost: 1.0 + - source: N1 + target: N4 + capacity: 2.0 + cost: 1.0 + - source: N2 + target: N3 + capacity: 2.0 + cost: 1.0 + - source: N2 + target: N4 + capacity: 1.0 + cost: 1.0 + - source: N3 + target: N4 + capacity: 2.0 + cost: 1.0 + +failures: + single_link_failure: + modes: + - weight: 1.0 + rules: + - scope: link + mode: choice + count: 1 + +demands: + baseline: + - source: "^N([1-4])$" + target: "^N([1-4])$" + volume: 12.0 + mode: pairwise + +workflow: + - type: NetworkStats + name: stats +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 4, ( + f"Expected 4 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count (full mesh of 4 nodes = 6 links) + assert len(scenario.network.links) == 6, ( + f"Expected 6 links, got {len(scenario.network.links)}" + ) + + # Validate seed + assert scenario.seed == 42 + + +# ============================================================================= +# Example 6: Attribute-Based Selectors +# ============================================================================= +def test_example_6_attribute_selectors(): + """Example 6: Attribute-Based Selectors - using match conditions. + + Expected: 8 nodes, 8 links (only rack-1 servers connect to switches) + """ + yaml_content = """ +network: + nodes: + servers: + count: 4 + template: "srv{n}" + attrs: + role: compute + rack: "rack-1" + servers_b: + count: 2 + template: "srvb{n}" + attrs: + role: compute + rack: "rack-9" + switches: + count: 2 + template: "sw{n}" + attrs: + tier: spine + + links: + - source: + path: "/servers" + match: + logic: and + conditions: + - attr: role + op: "==" + value: compute + - attr: rack + op: "!=" + value: "rack-9" + target: + path: "/switches" + match: + conditions: + - attr: tier + op: "==" + value: spine + pattern: mesh + capacity: 10 + cost: 1 +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count: 4 servers + 2 servers_b + 2 switches = 8 + assert len(scenario.network.nodes) == 8, ( + f"Expected 8 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count: only rack-1 servers (4) connect to switches (2) = 8 links + assert len(scenario.network.links) == 8, ( + f"Expected 8 links, got {len(scenario.network.links)}" + ) + + +# ============================================================================= +# Example 7: Blueprint with Parameter Overrides +# ============================================================================= +def test_example_7_blueprint_params(): + """Example 7: Blueprint with Parameter Overrides. + + Expected: Node Main/leaf/leaf1 has attrs.some_field.nested_key = 999 + """ + yaml_content = """ +blueprints: + bp1: + nodes: + leaf: + count: 1 + attrs: + some_field: + nested_key: 111 + +network: + nodes: + Main: + blueprint: bp1 + params: + leaf.attrs.some_field.nested_key: 999 +""" + scenario = Scenario.from_yaml(yaml_content) + + # Find the leaf node + leaf_nodes = [n for n in scenario.network.nodes if "leaf" in n] + assert len(leaf_nodes) == 1, f"Expected 1 leaf node, got {len(leaf_nodes)}" + + leaf_node = scenario.network.nodes[leaf_nodes[0]] + assert leaf_node.attrs["some_field"]["nested_key"] == 999, ( + f"Expected nested_key=999, got {leaf_node.attrs['some_field']['nested_key']}" + ) + + +# ============================================================================= +# Example 8: Node and Link Rules +# ============================================================================= +def test_example_8_node_link_rules(): + """Example 8: Node and Link Rules - modifying topology after creation. + + Expected: Switches 1 and 3 disabled, specific link upgraded to 200 capacity + """ + yaml_content = """ +blueprints: + test_bp: + nodes: + switches: + count: 3 + template: "switch{n}" + +network: + nodes: + group1: + count: 2 + template: "node{n}" + group2: + count: 2 + template: "node{n}" + my_clos1: + blueprint: test_bp + + links: + - source: /group1 + target: /group2 + pattern: mesh + capacity: 100 + cost: 10 + + node_rules: + - path: "^my_clos1/switches/switch(1|3)$" + disabled: true + attrs: + maintenance_mode: active + hw_type: newer_model + + link_rules: + - source: "^group1/node1$" + target: "^group2/node1$" + capacity: 200 + cost: 5 +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate switches 1 and 3 are disabled + switch1 = scenario.network.nodes.get("my_clos1/switches/switch1") + switch3 = scenario.network.nodes.get("my_clos1/switches/switch3") + switch2 = scenario.network.nodes.get("my_clos1/switches/switch2") + + assert switch1 is not None and switch1.disabled, "switch1 should be disabled" + assert switch3 is not None and switch3.disabled, "switch3 should be disabled" + assert switch2 is not None and not switch2.disabled, ( + "switch2 should not be disabled" + ) + + # Validate link rule applied + upgraded_link = None + for link in scenario.network.links.values(): + if link.source == "group1/node1" and link.target == "group2/node1": + upgraded_link = link + break + + assert upgraded_link is not None, "Upgraded link not found" + assert upgraded_link.capacity == 200, ( + f"Expected capacity 200, got {upgraded_link.capacity}" + ) + assert upgraded_link.cost == 5, f"Expected cost 5, got {upgraded_link.cost}" + + +# ============================================================================= +# Example 9: Complete Traffic Analysis +# ============================================================================= +def test_example_9_traffic_analysis(): + """Example 9: Complete Traffic Analysis - full workflow with MSD and placement. + + Expected: 40 nodes per pod (4 spine + 16 leaf), 2 pods, workflow steps defined + """ + yaml_content = """ +seed: 42 + +blueprints: + Clos_L16_S4: + nodes: + spine: + count: 4 + template: spine{n} + attrs: + role: spine + leaf: + count: 16 + template: leaf{n} + attrs: + role: leaf + links: + - source: /leaf + target: /spine + pattern: mesh + capacity: 3200 + cost: 1 + +network: + nodes: + metro1/pop[1-2]: + blueprint: Clos_L16_S4 + attrs: + metro_name: new-york + node_type: pop + +demands: + baseline: + - source: "^metro1/pop1/.*" + target: "^metro1/pop2/.*" + volume: 15000.0 + mode: pairwise + flow_policy: TE_WCMP_UNLIM + +failures: + single_link: + modes: + - weight: 1.0 + rules: + - scope: link + mode: choice + count: 1 + +workflow: + - type: NetworkStats + name: network_statistics +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count: 2 pops * (4 spine + 16 leaf) = 40 + assert len(scenario.network.nodes) == 40, ( + f"Expected 40 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate workflow steps + assert len(scenario.workflow) == 1 + + # Run workflow + scenario.run() + + # Check stats were computed + results = scenario.results.to_dict() + assert "network_statistics" in results["steps"] + assert results["steps"]["network_statistics"]["data"]["node_count"] == 40 + + +# ============================================================================= +# Example 10: Group-By Selectors +# ============================================================================= +def test_example_10_group_by(): + """Example 10: Group-By Selectors - grouping nodes by attribute. + + Expected: 4 nodes, 2 links, traffic flows grouped by datacenter + """ + yaml_content = """ +network: + nodes: + dc1_srv1: {attrs: {dc: dc1, role: server}} + dc1_srv2: {attrs: {dc: dc1, role: server}} + dc2_srv1: {attrs: {dc: dc2, role: server}} + dc2_srv2: {attrs: {dc: dc2, role: server}} + links: + - source: dc1_srv1 + target: dc2_srv1 + capacity: 100 + - source: dc1_srv2 + target: dc2_srv2 + capacity: 100 + +demands: + inter_dc: + - source: + group_by: dc + target: + group_by: dc + volume: 100 + mode: pairwise +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 4, ( + f"Expected 4 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count + assert len(scenario.network.links) == 2, ( + f"Expected 2 links, got {len(scenario.network.links)}" + ) + + # Validate demands + demands = scenario.demand_set.get_set("inter_dc") + assert len(demands) == 1 + + +# ============================================================================= +# Example 11: Advanced Failure Policies +# ============================================================================= +def test_example_11_advanced_failures(): + """Example 11: Advanced Failure Policies - weighted modes with conditions. + + Expected: 5 nodes, 4 links, 3 risk groups, failure policy with 4 modes + """ + yaml_content = """ +network: + nodes: + core1: {attrs: {role: core, capacity_gbps: 1000}} + core2: {attrs: {role: core, capacity_gbps: 1000}} + edge1: {attrs: {role: edge, capacity_gbps: 400, region: west}} + edge2: {attrs: {role: edge, capacity_gbps: 400, region: east}} + edge3: {attrs: {role: edge, capacity_gbps: 200, region: west}} + links: + - source: core1 + target: core2 + capacity: 1000 + risk_groups: [RG_core] + - source: core1 + target: edge1 + capacity: 400 + risk_groups: [RG_west] + - source: core1 + target: edge3 + capacity: 200 + risk_groups: [RG_west] + - source: core2 + target: edge2 + capacity: 400 + risk_groups: [RG_east] + +risk_groups: + - name: RG_core + attrs: {tier: core, distance_km: 50} + - name: RG_west + attrs: {tier: edge, distance_km: 500} + - name: RG_east + attrs: {tier: edge, distance_km: 800} + +failures: + mixed_failures: + expand_groups: true + expand_children: false + modes: + # 40% chance: fail 1 edge node weighted by capacity + - weight: 0.4 + attrs: {scenario: edge_failure} + rules: + - scope: node + mode: choice + count: 1 + match: + conditions: + - attr: role + op: "==" + value: edge + weight_by: capacity_gbps + + # 35% chance: fail 1 risk group weighted by distance + - weight: 0.35 + attrs: {scenario: srlg_failure} + rules: + - scope: risk_group + mode: choice + count: 1 + weight_by: distance_km + + # 15% chance: fail all west-region nodes + - weight: 0.15 + attrs: {scenario: regional_outage} + rules: + - scope: node + mode: all + match: + conditions: + - attr: region + op: "==" + value: west + + # 10% chance: random link failures (5% each) + - weight: 0.1 + attrs: {scenario: random_link} + rules: + - scope: link + mode: random + probability: 0.05 +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 5, ( + f"Expected 5 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count + assert len(scenario.network.links) == 4, ( + f"Expected 4 links, got {len(scenario.network.links)}" + ) + + # Validate risk groups + assert len(scenario.network.risk_groups) == 3, ( + f"Expected 3 risk groups, got {len(scenario.network.risk_groups)}" + ) + + # Validate failure policy modes + policy = scenario.failure_policy_set.get_policy("mixed_failures") + assert len(policy.modes) == 4, f"Expected 4 modes, got {len(policy.modes)}" + assert policy.expand_groups is True + assert policy.expand_children is False + + +# ============================================================================= +# Example 12: Hardware Components and Cost Analysis +# ============================================================================= +def test_example_12_hardware_components(): + """Example 12: Hardware Components and Cost Analysis. + + Expected: 6 nodes, 16 links (4x2x2), components library populated + """ + yaml_content = """ +components: + SpineRouter: + component_type: chassis + description: "64-port spine switch" + capex: 55000.0 + power_watts: 2000.0 + power_watts_max: 3000.0 + capacity: 102400.0 + ports: 64 + + LeafRouter: + component_type: chassis + description: "48-port leaf switch" + capex: 25000.0 + power_watts: 800.0 + power_watts_max: 1200.0 + capacity: 38400.0 + ports: 48 + + Optic400G: + component_type: optic + description: "400G DR4 pluggable" + capex: 3000.0 + power_watts: 16.0 + capacity: 400.0 + +network: + name: "datacenter-fabric" + version: "2.0" + + nodes: + spine: + count: 2 + template: "spine{n}" + attrs: + hardware: + component: SpineRouter + count: 1 + leaf: + count: 4 + template: "leaf{n}" + attrs: + hardware: + component: LeafRouter + count: 1 + + links: + - source: /leaf + target: /spine + pattern: mesh + count: 2 + capacity: 800 + cost: 1 + attrs: + hardware: + source: + component: Optic400G + count: 2 + target: + component: Optic400G + count: 2 + exclusive: true + +workflow: + - type: NetworkStats + name: stats +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count: 2 spine + 4 leaf = 6 + assert len(scenario.network.nodes) == 6, ( + f"Expected 6 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count: 4 leaf * 2 spine * 2 parallel = 16 + assert len(scenario.network.links) == 16, ( + f"Expected 16 links, got {len(scenario.network.links)}" + ) + + # Validate components library + assert len(scenario.components_library.components) == 3 + assert scenario.components_library.get("SpineRouter") is not None + assert scenario.components_library.get("LeafRouter") is not None + assert scenario.components_library.get("Optic400G") is not None + + +# ============================================================================= +# Example 13: YAML Anchors for Reuse +# ============================================================================= +def test_example_13_yaml_anchors(): + """Example 13: YAML Anchors for Reuse. + + Expected: Anchors resolved during YAML parsing, 6 nodes, 8 links + """ + yaml_content = """ +vars: + default_link: &link_cfg + capacity: 100 + cost: 1 + spine_attrs: &spine_attrs + role: spine + tier: 2 + leaf_attrs: &leaf_attrs + role: leaf + tier: 1 + +network: + nodes: + spine: + count: 2 + template: "spine{n}" + attrs: + <<: *spine_attrs + region: east + + leaf: + count: 4 + template: "leaf{n}" + attrs: + <<: *leaf_attrs + region: east + + links: + - source: /leaf + target: /spine + pattern: mesh + <<: *link_cfg + attrs: + link_type: fabric +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count: 2 spine + 4 leaf = 6 + assert len(scenario.network.nodes) == 6, ( + f"Expected 6 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count: 4 leaf * 2 spine = 8 + assert len(scenario.network.links) == 8, ( + f"Expected 8 links, got {len(scenario.network.links)}" + ) + + # Validate anchor values were resolved + spine_node = scenario.network.nodes.get("spine/spine1") + assert spine_node.attrs["role"] == "spine" + assert spine_node.attrs["tier"] == 2 + + +# ============================================================================= +# Example 14: One-to-One Pattern and Zip Expansion +# ============================================================================= +def test_example_14_one_to_one_zip(): + """Example 14: One-to-One Pattern and Zip Expansion. + + Expected: Demonstrates one_to_one modulo wrap and zip expansion + """ + yaml_content = """ +network: + nodes: + # 4 servers, 2 switches - compatible for one_to_one (4 is multiple of 2) + server[1-4]: + count: 1 + template: "srv" + switch[1-2]: + count: 1 + template: "sw" + + links: + # one_to_one: server1->switch1, server2->switch2, server3->switch1, server4->switch2 + - source: /server + target: /switch + pattern: one_to_one + capacity: 100 + + # zip expansion: pairs variables by index (equal-length lists required) + - source: "server${idx}" + target: "switch${sw}" + expand: + vars: + idx: [1, 2] + sw: [1, 2] + mode: zip + pattern: one_to_one + capacity: 50 + cost: 2 +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count: 4 servers + 2 switches = 6 + assert len(scenario.network.nodes) == 6, ( + f"Expected 6 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate links were created + assert len(scenario.network.links) > 0, "Expected links to be created" + + +# ============================================================================= +# Example 15: Traffic Demands with Variable Expansion and Group Modes +# ============================================================================= +def test_example_15_demand_variables(): + """Example 15: Traffic Demands with Variable Expansion and Group Modes. + + Expected: Variable expansion in demands, group_mode, priority, demand_placed + """ + yaml_content = """ +network: + nodes: + dc1_leaf1: {attrs: {dc: dc1, role: leaf}} + dc1_leaf2: {attrs: {dc: dc1, role: leaf}} + dc2_leaf1: {attrs: {dc: dc2, role: leaf}} + dc2_leaf2: {attrs: {dc: dc2, role: leaf}} + dc3_leaf1: {attrs: {dc: dc3, role: leaf}} + links: + - {source: dc1_leaf1, target: dc2_leaf1, capacity: 100} + - {source: dc1_leaf2, target: dc2_leaf2, capacity: 100} + - {source: dc2_leaf1, target: dc3_leaf1, capacity: 100} + +demands: + # Variable expansion in demands + inter_dc: + - source: "^${src}/.*" + target: "^${dst}/.*" + volume: 50 + expand: + vars: + src: [dc1, dc2] + dst: [dc2, dc3] + mode: zip + + # Group modes with group_by + grouped: + - source: + group_by: dc + target: + group_by: dc + volume: 100 + mode: pairwise + group_mode: per_group + priority: 1 + demand_placed: 10.0 + flow_policy: SHORTEST_PATHS_WCMP +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 5, ( + f"Expected 5 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate inter_dc demands were expanded + inter_dc_demands = scenario.demand_set.get_set("inter_dc") + assert len(inter_dc_demands) == 2, ( + f"Expected 2 inter_dc demands, got {len(inter_dc_demands)}" + ) + + # Validate grouped demands + grouped_demands = scenario.demand_set.get_set("grouped") + assert len(grouped_demands) == 1 + + +# ============================================================================= +# Example 16: Hierarchical Risk Groups +# ============================================================================= +def test_example_16_hierarchical_risk_groups(): + """Example 16: Hierarchical Risk Groups - nested risk group structure. + + Expected: Hierarchical risk groups with children, recursive failure expansion. + + Note: Nodes must reference risk groups defined at the top level. Child groups + are used for hierarchical failure expansion (expand_children: true) but nodes + reference the leaf-level groups which must be defined at the top level. + """ + yaml_content = """ +network: + nodes: + rack1_srv1: {risk_groups: [Rack1_Card1]} + rack1_srv2: {risk_groups: [Rack1_Card1]} + rack1_srv3: {risk_groups: [Rack1_Card2]} + rack2_srv1: {risk_groups: [Rack2]} + links: + - {source: rack1_srv1, target: rack2_srv1, capacity: 100} + - {source: rack1_srv2, target: rack2_srv1, capacity: 100} + - {source: rack1_srv3, target: rack2_srv1, capacity: 100} + +risk_groups: + # All risk groups that nodes reference must be defined at top level + - name: Rack1_Card1 + attrs: {slot: 1, parent: Rack1} + - name: Rack1_Card2 + attrs: {slot: 2, parent: Rack1} + - name: Rack2 + disabled: false + attrs: {location: "DC1-Row2"} + # Parent risk group with children for hierarchical failure expansion + - name: Rack1 + attrs: {location: "DC1-Row1"} + children: + - name: Rack1_Card1 + - name: Rack1_Card2 + +failures: + hierarchical: + expand_groups: true + expand_children: true + modes: + - weight: 1.0 + rules: + - scope: risk_group + mode: choice + count: 1 + match: + conditions: + - attr: location + op: contains + value: "DC1" +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 4, ( + f"Expected 4 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate all risk groups exist at top level + assert "Rack1" in scenario.network.risk_groups + assert "Rack2" in scenario.network.risk_groups + assert "Rack1_Card1" in scenario.network.risk_groups + assert "Rack1_Card2" in scenario.network.risk_groups + + # Validate Rack1 has children (for hierarchical expansion) + rack1 = scenario.network.risk_groups["Rack1"] + assert len(rack1.children) == 2, f"Expected 2 children, got {len(rack1.children)}" + + # Validate failure policy + policy = scenario.failure_policy_set.get_policy("hierarchical") + assert policy.expand_groups is True + assert policy.expand_children is True + + +# ============================================================================= +# Example 17: Risk Group Membership Rules +# ============================================================================= +def test_example_17_membership_rules(): + """Example 17: Risk Group Membership Rules - dynamic assignment by attributes. + + Expected: Nodes and links automatically assigned to risk groups + """ + yaml_content = """ +network: + nodes: + core1: {attrs: {role: core, tier: 3, datacenter: dc1}} + core2: {attrs: {role: core, tier: 3, datacenter: dc2}} + edge1: {attrs: {role: edge, tier: 1, datacenter: dc1}} + edge2: {attrs: {role: edge, tier: 1, datacenter: dc2}} + links: + - source: core1 + target: core2 + capacity: 1000 + attrs: + route_type: backbone + path_id: primary + - source: core1 + target: edge1 + capacity: 400 + +risk_groups: + # Assign all core tier-3 nodes + - name: CoreTier3 + membership: + scope: node + match: + logic: and + conditions: + - attr: role + op: "==" + value: core + - attr: tier + op: "==" + value: 3 + + # Assign links by route type + - name: BackboneLinks + membership: + scope: link + match: + logic: and + conditions: + - attr: route_type + op: "==" + value: backbone + + # String shorthand for simple groups + - "ManualGroup1" +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 4, ( + f"Expected 4 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate risk groups + assert "CoreTier3" in scenario.network.risk_groups + assert "BackboneLinks" in scenario.network.risk_groups + assert "ManualGroup1" in scenario.network.risk_groups + + # Validate membership rule applied - core nodes should have CoreTier3 + core1 = scenario.network.nodes["core1"] + core2 = scenario.network.nodes["core2"] + assert "CoreTier3" in core1.risk_groups, ( + f"core1 should have CoreTier3, has {core1.risk_groups}" + ) + assert "CoreTier3" in core2.risk_groups, ( + f"core2 should have CoreTier3, has {core2.risk_groups}" + ) + + # Validate link membership + backbone_link = None + for link in scenario.network.links.values(): + if link.source == "core1" and link.target == "core2": + backbone_link = link + break + + assert backbone_link is not None + assert "BackboneLinks" in backbone_link.risk_groups, ( + f"Backbone link should have BackboneLinks, has {backbone_link.risk_groups}" + ) + + +# ============================================================================= +# Example 18: Generated Risk Groups +# ============================================================================= +def test_example_18_generated_risk_groups(): + """Example 18: Generated Risk Groups - create from unique attribute values. + + Expected: 6 risk groups created from datacenter, rack, and connection_type + """ + yaml_content = """ +network: + nodes: + srv1: {attrs: {datacenter: dc1, rack: r1}} + srv2: {attrs: {datacenter: dc1, rack: r2}} + srv3: {attrs: {datacenter: dc2, rack: r1}} + links: + - source: srv1 + target: srv2 + capacity: 100 + attrs: + connection_type: intra_dc + - source: srv2 + target: srv3 + capacity: 100 + attrs: + connection_type: inter_dc + +risk_groups: + # Generate risk group per datacenter (from nodes) + - generate: + scope: node + group_by: datacenter + name: "DC_${value}" + attrs: + generated: true + type: location + + # Generate risk group per rack (from nodes) + - generate: + scope: node + group_by: rack + name: "Rack_${value}" + + # Generate risk group per connection type (from links) + - generate: + scope: link + group_by: connection_type + name: "Links_${value}" +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 3, ( + f"Expected 3 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate generated risk groups exist + # DC_dc1, DC_dc2 (2 groups from datacenter) + # Rack_r1, Rack_r2 (2 groups from rack) + # Links_intra_dc, Links_inter_dc (2 groups from connection_type) + expected_rgs = [ + "DC_dc1", + "DC_dc2", + "Rack_r1", + "Rack_r2", + "Links_intra_dc", + "Links_inter_dc", + ] + for rg_name in expected_rgs: + assert rg_name in scenario.network.risk_groups, ( + f"Missing generated risk group: {rg_name}" + ) + + assert len(scenario.network.risk_groups) == 6, ( + f"Expected 6 risk groups, got {len(scenario.network.risk_groups)}" + ) + + +# ============================================================================= +# Example 19: Additional Selector Operators +# ============================================================================= +def test_example_19_selector_operators(): + """Example 19: Additional Selector Operators - all condition operators. + + Expected: Demonstrates >=, <, in, contains, exists, not_exists operators + """ + yaml_content = """ +network: + nodes: + srv1: {attrs: {tier: 1, tags: [prod, web], region: null}} + srv2: {attrs: {tier: 2, tags: [prod, db], region: east}} + srv3: {attrs: {tier: 3, tags: [dev], region: west}} + srv4: {attrs: {tier: 2}} + links: + - {source: srv1, target: srv2, capacity: 100} + - {source: srv2, target: srv3, capacity: 100} + - {source: srv3, target: srv4, capacity: 100} + +demands: + filtered: + # Tier comparison operators + - source: + match: + conditions: + - attr: tier + op: ">=" + value: 2 + target: + match: + conditions: + - attr: tier + op: "<" + value: 3 + volume: 50 + mode: pairwise + + # List membership operators + - source: + match: + conditions: + - attr: region + op: in + value: [east, west] + target: + match: + conditions: + - attr: tags + op: contains + value: prod + volume: 25 + mode: combine + + # Existence operators + - source: + match: + conditions: + - attr: region + op: exists + target: + match: + conditions: + - attr: region + op: not_exists + volume: 10 + mode: pairwise +""" + scenario = Scenario.from_yaml(yaml_content) + + # Validate node count + assert len(scenario.network.nodes) == 4, ( + f"Expected 4 nodes, got {len(scenario.network.nodes)}" + ) + + # Validate link count + assert len(scenario.network.links) == 3, ( + f"Expected 3 links, got {len(scenario.network.links)}" + ) + + # Validate demands were parsed + demands = scenario.demand_set.get_set("filtered") + assert len(demands) == 3, f"Expected 3 demands, got {len(demands)}" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/integration/README.md b/tests/integration/README.md index f4eb10f..8467c17 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -403,20 +403,6 @@ def test_missing_blueprint(): scenario.run() ``` -### Migration Guide - -#### **Existing Tests** - -- Keep existing YAML-based tests as integration references -- Add template-based variants for parameterized testing -- Migrate error cases to use template builders - -#### **New Tests** - -- Start with appropriate template builder -- Use `ScenarioTemplateBuilder` for high-level composition -- Use specialized templates for specific test categories - ### Template Development #### **Adding New Templates** diff --git a/tests/integration/expectations.py b/tests/integration/expectations.py index d609407..9f481b1 100644 --- a/tests/integration/expectations.py +++ b/tests/integration/expectations.py @@ -59,7 +59,7 @@ def _calculate_scenario_3_total_nodes() -> int: # Scenario 1: Basic 6-node L3 US backbone network # Simple topology with explicitly defined nodes and links SCENARIO_1_EXPECTATIONS = NetworkExpectations( - node_count=6, + count=6, edge_count=SCENARIO_1_PHYSICAL_LINKS * DEFAULT_BIDIRECTIONAL_MULTIPLIER, specific_nodes={"SEA", "SFO", "DEN", "DFW", "JFK", "DCA"}, specific_links=[ @@ -79,7 +79,7 @@ def _calculate_scenario_3_total_nodes() -> int: # Scenario 2: Hierarchical DSL with blueprints and multi-node expansions # Topology using nested blueprints with parameter overrides SCENARIO_2_EXPECTATIONS = NetworkExpectations( - node_count=sum(SCENARIO_2_NODE_BREAKDOWN.values()), + count=sum(SCENARIO_2_NODE_BREAKDOWN.values()), edge_count=SCENARIO_2_PHYSICAL_LINKS * DEFAULT_BIDIRECTIONAL_MULTIPLIER, specific_nodes={"DEN", "DFW", "JFK", "DCA"}, # Standalone nodes blueprint_expansions={ @@ -96,7 +96,7 @@ def _calculate_scenario_3_total_nodes() -> int: # Scenario 3: 3-tier Clos network with nested blueprints # Topology with deep blueprint nesting and capacity probing SCENARIO_3_EXPECTATIONS = NetworkExpectations( - node_count=_calculate_scenario_3_total_nodes(), + count=_calculate_scenario_3_total_nodes(), edge_count=SCENARIO_3_PHYSICAL_LINKS * DEFAULT_BIDIRECTIONAL_MULTIPLIER, specific_nodes=set(), # All nodes generated from blueprints blueprint_expansions={ @@ -186,7 +186,7 @@ def _calculate_scenario_4_total_links() -> int: # Main expectation structure for scenario 4 SCENARIO_4_EXPECTATIONS = NetworkExpectations( - node_count=_calculate_scenario_4_total_nodes(), # Total nodes after disabled rack + count=_calculate_scenario_4_total_nodes(), # Total nodes after disabled rack edge_count=_calculate_scenario_4_total_links(), # Actual observed link count specific_nodes=set(), # All nodes generated from blueprints and expansion blueprint_expansions={ diff --git a/tests/integration/helpers.py b/tests/integration/helpers.py index 5a6152b..c9bc3ee 100644 --- a/tests/integration/helpers.py +++ b/tests/integration/helpers.py @@ -50,14 +50,14 @@ class NetworkExpectations: specific network elements, and blueprint expansion results. Attributes: - node_count: Expected total number of nodes in the final network + count: Expected total number of nodes in the final network edge_count: Expected total number of directed edges (links * 2 for bidirectional) specific_nodes: Set of specific node names that must be present specific_links: List of (source, target) tuples that must exist as links blueprint_expansions: Dict mapping blueprint paths to expected node counts """ - node_count: int + count: int edge_count: int specific_nodes: Optional[Set[str]] = None specific_links: Optional[List[Tuple[str, str]]] = None @@ -152,8 +152,8 @@ def validate_network_structure(self, expectations: NetworkExpectations) -> None: # Validate node count with detailed context actual_nodes = len(self.graph.nodes) - assert actual_nodes == expectations.node_count, ( - f"Network node count mismatch: expected {expectations.node_count}, " + assert actual_nodes == expectations.count, ( + f"Network node count mismatch: expected {expectations.count}, " f"found {actual_nodes}. " f"Graph nodes: {sorted(list(self.graph.nodes)[:10])}{'...' if actual_nodes > 10 else ''}" ) @@ -239,12 +239,12 @@ def validate_traffic_demands(self, expected_count: int) -> None: Raises: AssertionError: If traffic demand count doesn't match expectations """ - default_demands = self.scenario.traffic_matrix_set.get_default_matrix() + default_demands = self.scenario.demand_set.get_default_set() actual_count = len(default_demands) assert actual_count == expected_count, ( f"Traffic demand count mismatch: expected {expected_count}, found {actual_count}. " - f"Demands: {[(d.source, d.sink, d.demand) for d in default_demands[:5]]}" + f"Demands: {[(d.source, d.target, d.volume) for d in default_demands[:5]]}" f"{'...' if actual_count > 5 else ''}" ) @@ -283,7 +283,7 @@ def validate_failure_policy( # Validate rule scopes if specified if expected_scopes: actual_scopes = [ - rule.entity_scope + rule.scope for mode in getattr(policy, "modes", []) for rule in mode.rules ] @@ -382,15 +382,7 @@ def validate_flow_results( """ exported = self.scenario.results.to_dict() step_data = exported.get("steps", {}).get(step_name, {}).get("data", {}) - # Prefer direct key actual_flow = step_data.get(flow_label) - # Fallback: if flow_results list present, try summary.total_placed - if actual_flow is None and flow_label == "total_placed": - flow_results = step_data.get("flow_results", []) - if flow_results: - actual_flow = float( - flow_results[0].get("summary", {}).get("total_placed", 0.0) - ) assert actual_flow is not None, ( f"Flow result '{flow_label}' not found for step '{step_name}'" ) @@ -531,8 +523,8 @@ def __init__(self) -> None: """Initialize empty scenario data with basic structure.""" self.data: Dict[str, Any] = { "network": {}, - "failure_policy_set": {}, - "traffic_matrix_set": {}, + "failures": {}, + "demands": {}, "workflow": [], } @@ -586,7 +578,8 @@ def with_simple_links( { "source": source, "target": target, - "link_params": {"capacity": capacity, "cost": DEFAULT_LINK_COST}, + "capacity": capacity, + "cost": DEFAULT_LINK_COST, } ) return self @@ -610,25 +603,25 @@ def with_blueprint( return self def with_traffic_demand( - self, source: str, sink: str, demand: float, matrix_name: str = "default" + self, source: str, target: str, volume: float, demand_set: str = "default" ) -> "ScenarioDataBuilder": """ Add a traffic demand to the specified traffic matrix. Args: source: Source node/pattern for traffic demand - sink: Sink node/pattern for traffic demand - demand: Traffic demand value - matrix_name: Name of traffic matrix (default: "default") + target: Target node/pattern for traffic demand + volume: Traffic demand volume + demand_set: Name of traffic matrix (default: "default") Returns: Self for method chaining """ - if matrix_name not in self.data["traffic_matrix_set"]: - self.data["traffic_matrix_set"][matrix_name] = [] + if demand_set not in self.data["demands"]: + self.data["demands"][demand_set] = [] - self.data["traffic_matrix_set"][matrix_name].append( - {"source": source, "sink": sink, "demand": demand} + self.data["demands"][demand_set].append( + {"source": source, "target": target, "volume": volume} ) return self @@ -646,24 +639,24 @@ def with_failure_policy( Returns: Self for method chaining """ - self.data["failure_policy_set"][policy_name] = policy_data + self.data["failures"][policy_name] = policy_data return self def with_workflow_step( - self, step_type: str, name: str, **kwargs + self, type: str, name: str, **kwargs ) -> "ScenarioDataBuilder": """ Add a workflow step to the scenario execution plan. Args: - step_type: Type of workflow step (e.g., "BuildGraph", "CapacityEnvelopeAnalysis") + type: Type of workflow step (e.g., "BuildGraph", "CapacityEnvelopeAnalysis") name: Unique name for this step instance **kwargs: Additional step-specific parameters Returns: Self for method chaining """ - step_data = {"step_type": step_type, "name": name} + step_data = {"type": type, "name": name} step_data.update(kwargs) self.data["workflow"].append(step_data) return self @@ -683,9 +676,9 @@ def build_yaml(self) -> str: # Ensure BuildGraph workflow step is included if workflow exists but lacks one workflow_steps = self.data.get("workflow", []) if workflow_steps and not any( - step.get("step_type") == "BuildGraph" for step in workflow_steps + step.get("type") == "BuildGraph" for step in workflow_steps ): - workflow_steps.insert(0, {"step_type": "BuildGraph", "name": "build_graph"}) + workflow_steps.insert(0, {"type": "BuildGraph", "name": "build_graph"}) self.data["workflow"] = workflow_steps return yaml.dump(self.data, default_flow_style=False) @@ -784,7 +777,7 @@ def basic_failure_scenario() -> Scenario: "single_link_failure", { "attrs": {"description": "Single link failure"}, - "rules": [{"entity_scope": "link", "rule_type": "choice", "count": 1}], + "rules": [{"scope": "link", "mode": "choice", "count": 1}], }, ) .with_workflow_step("BuildGraph", "build_graph") diff --git a/tests/integration/scenario_1.yaml b/tests/integration/scenario_1.yaml index 75f6261..ea17ab4 100644 --- a/tests/integration/scenario_1.yaml +++ b/tests/integration/scenario_1.yaml @@ -30,113 +30,103 @@ network: # West -> Middle - source: SEA target: DEN - link_params: - capacity: 200 - cost: 6846 - attrs: - distance_km: 1369.13 + capacity: 200 + cost: 6846 + attrs: + distance_km: 1369.13 - source: SFO target: DEN - link_params: - capacity: 200 - cost: 7754 - attrs: - distance_km: 1550.77 + capacity: 200 + cost: 7754 + attrs: + distance_km: 1550.77 - source: SEA target: DFW - link_params: - capacity: 200 - cost: 9600 - attrs: - distance_km: 1920 + capacity: 200 + cost: 9600 + attrs: + distance_km: 1920 - source: SFO target: DFW - link_params: - capacity: 200 - cost: 10000 - attrs: - distance_km: 2000 + capacity: 200 + cost: 10000 + attrs: + distance_km: 2000 # Middle <-> Middle (two parallel links to represent redundancy) - source: DEN target: DFW - link_params: - capacity: 400 - cost: 7102 - attrs: - distance_km: 1420.28 + capacity: 400 + cost: 7102 + attrs: + distance_km: 1420.28 - source: DEN target: DFW - link_params: - capacity: 400 - cost: 7102 - attrs: - distance_km: 1420.28 + capacity: 400 + cost: 7102 + attrs: + distance_km: 1420.28 # Middle -> East - source: DEN target: JFK - link_params: - capacity: 200 - cost: 7500 - attrs: - distance_km: 1500 + capacity: 200 + cost: 7500 + attrs: + distance_km: 1500 - source: DFW target: DCA - link_params: - capacity: 200 - cost: 8000 - attrs: - distance_km: 1600 + capacity: 200 + cost: 8000 + attrs: + distance_km: 1600 - source: DFW target: JFK - link_params: - capacity: 200 - cost: 9500 - attrs: - distance_km: 1900 + capacity: 200 + cost: 9500 + attrs: + distance_km: 1900 # East <-> East - source: JFK target: DCA - link_params: - capacity: 100 - cost: 1714 - attrs: - distance_km: 342.69 + capacity: 100 + cost: 1714 + attrs: + distance_km: 342.69 -failure_policy_set: +failures: default: attrs: description: "Evaluate traffic routing under any single link failure." modes: - weight: 1.0 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 1 -traffic_matrix_set: +demands: default: - source: SEA - sink: JFK - demand: 50 + target: JFK + volume: 50 - source: SFO - sink: DCA - demand: 50 + target: DCA + volume: 50 - source: SEA - sink: DCA - demand: 50 + target: DCA + volume: 50 - source: SFO - sink: JFK - demand: 50 + target: JFK + volume: 50 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph diff --git a/tests/integration/scenario_2.yaml b/tests/integration/scenario_2.yaml index ac3b22b..762585b 100644 --- a/tests/integration/scenario_2.yaml +++ b/tests/integration/scenario_2.yaml @@ -19,198 +19,184 @@ blueprints: # A "blueprint" describes a reusable fragment of topology (e.g., a pattern). # It can be referenced in other blueprints or in the main 'network'. clos_2tier: - groups: + nodes: leaf: - node_count: 4 - name_template: leaf-{node_num} + count: 4 + template: leaf-{n} spine: - node_count: 4 - name_template: spine-{node_num} + count: 4 + template: spine-{n} - adjacency: + links: - source: /leaf target: /spine pattern: mesh - link_params: - capacity: 100 - cost: 1000 + capacity: 100 + cost: 1000 # Another blueprint referencing 'clos_2tier' as a sub-topology. city_cloud: - groups: + nodes: clos_instance: # Uses the 'clos_2tier' blueprint but overrides some parameters. - use_blueprint: clos_2tier - parameters: + blueprint: clos_2tier + params: # Override: more spine nodes and custom naming convention - spine.node_count: 6 - spine.name_template: "myspine-{node_num}" + spine.count: 6 + spine.template: "myspine-{n}" edge_nodes: - node_count: 4 - name_template: edge-{node_num} + count: 4 + template: edge-{n} - adjacency: + links: - source: /clos_instance/leaf target: /edge_nodes pattern: mesh - link_params: - capacity: 100 - cost: 1000 + capacity: 100 + cost: 1000 # A minimal blueprint representing a single node. single_node: - groups: + nodes: single: - node_count: 1 - name_template: single-{node_num} + count: 1 + template: single-{n} # -- Main network definition -- network: name: "6-node-l3-us-backbone" version: 1.1 - groups: + nodes: # 1) 'SEA' references the 'city_cloud' blueprint. This creates subgroups # "SEA/clos_instance" and "SEA/edge_nodes" in the global scope. SEA: - use_blueprint: city_cloud + blueprint: city_cloud attrs: coords: [47.6062, -122.3321] # 2) 'SFO' references 'single_node' (one-node blueprint). SFO: - use_blueprint: single_node + blueprint: single_node attrs: coords: [37.7749, -122.4194] - adjacency: + # Standalone nodes + DEN: + attrs: + coords: [39.7392, -104.9903] + DFW: + attrs: + coords: [32.8998, -97.0403] + JFK: + attrs: + coords: [40.641766, -73.780968] + DCA: + attrs: + coords: [38.907192, -77.036871] + + links: # Each adjacency definition uses "mesh" in this scenario. Self-loops are automatically skipped. - source: /SFO target: /DEN pattern: mesh - link_params: - capacity: 100 - cost: 7754 - attrs: - distance_km: 1550.77 + capacity: 100 + cost: 7754 + attrs: + distance_km: 1550.77 - source: /SFO target: /DFW pattern: mesh - link_params: - capacity: 200 - cost: 10000 - attrs: - distance_km: 2000 + capacity: 200 + cost: 10000 + attrs: + distance_km: 2000 - source: /SEA/edge_nodes target: /DEN pattern: mesh - link_params: - capacity: 100 - cost: 6846 - attrs: - distance_km: 1369.13 + capacity: 100 + cost: 6846 + attrs: + distance_km: 1369.13 - source: /SEA/edge_nodes target: /DFW pattern: mesh - link_params: - capacity: 100 - cost: 9600 - attrs: - distance_km: 1920 - - # Standalone nodes - nodes: - DEN: - attrs: - coords: [39.7392, -104.9903] - DFW: - attrs: - coords: [32.8998, -97.0403] - JFK: - attrs: - coords: [40.641766, -73.780968] - DCA: + capacity: 100 + cost: 9600 attrs: - coords: [38.907192, -77.036871] + distance_km: 1920 - # Additional direct links - # Note that each link references existing nodes (e.g., DEN, DFW). - # If multiple links exist between the same two nodes, they have unique IDs - # generated by the code, but share the same source/target. - links: + # Additional direct links + # Note that each link references existing nodes (e.g., DEN, DFW). + # If multiple links exist between the same two nodes, they have unique IDs + # generated by the code, but share the same source/target. - source: DEN target: DFW - link_params: - capacity: 400 - cost: 7102 - attrs: - distance_km: 1420.28 + capacity: 400 + cost: 7102 + attrs: + distance_km: 1420.28 - source: DEN target: DFW - link_params: - capacity: 400 - cost: 7102 - attrs: - distance_km: 1420.28 + capacity: 400 + cost: 7102 + attrs: + distance_km: 1420.28 - source: DEN target: JFK - link_params: - capacity: 200 - cost: 7500 - attrs: - distance_km: 1500 + capacity: 200 + cost: 7500 + attrs: + distance_km: 1500 - source: DFW target: DCA - link_params: - capacity: 200 - cost: 8000 - attrs: - distance_km: 1600 + capacity: 200 + cost: 8000 + attrs: + distance_km: 1600 - source: DFW target: JFK - link_params: - capacity: 200 - cost: 9500 - attrs: - distance_km: 1900 + capacity: 200 + cost: 9500 + attrs: + distance_km: 1900 - source: JFK target: DCA - link_params: - capacity: 100 - cost: 1714 - attrs: - distance_km: 342.69 + capacity: 100 + cost: 1714 + attrs: + distance_km: 342.69 -failure_policy_set: +failures: default: attrs: description: "Evaluate traffic routing under any single link failure." modes: - weight: 1.0 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 1 -traffic_matrix_set: +demands: default: - source: SEA - sink: JFK - demand: 50 + target: JFK + volume: 50 - source: SFO - sink: DCA - demand: 50 + target: DCA + volume: 50 - source: SEA - sink: DCA - demand: 50 + target: DCA + volume: 50 - source: SFO - sink: JFK - demand: 50 + target: JFK + volume: 50 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph diff --git a/tests/integration/scenario_3.yaml b/tests/integration/scenario_3.yaml index 380fb42..d550e3a 100644 --- a/tests/integration/scenario_3.yaml +++ b/tests/integration/scenario_3.yaml @@ -4,90 +4,84 @@ seed: 3003 blueprints: brick_2tier: - groups: + nodes: t1: - node_count: 4 - name_template: t1-{node_num} + count: 4 + template: t1-{n} t2: - node_count: 4 - name_template: t2-{node_num} + count: 4 + template: t2-{n} - adjacency: + links: - source: /t1 target: /t2 pattern: mesh - link_params: - capacity: 100.0 # 100 Gb/s tier1-tier2 links - cost: 1 + capacity: 100.0 # 100 Gb/s tier1-tier2 links + cost: 1 3tier_clos: - groups: + nodes: b1: - use_blueprint: brick_2tier + blueprint: brick_2tier b2: - use_blueprint: brick_2tier + blueprint: brick_2tier spine: - node_count: 16 - name_template: t3-{node_num} + count: 16 + template: t3-{n} - adjacency: + links: - source: b1/t2 target: spine pattern: one_to_one - link_params: - capacity: 400.0 # 400 Gb/s tier2-spine links - cost: 1 + capacity: 400.0 # 400 Gb/s tier2-spine links + cost: 1 - source: b2/t2 target: spine pattern: one_to_one - link_params: - capacity: 400.0 # 400 Gb/s tier2-spine links - cost: 1 + capacity: 400.0 # 400 Gb/s tier2-spine links + cost: 1 network: name: "3tier_clos_network" version: 1.0 - groups: + nodes: my_clos1: - use_blueprint: 3tier_clos + blueprint: 3tier_clos my_clos2: - use_blueprint: 3tier_clos + blueprint: 3tier_clos - adjacency: + links: - source: my_clos1/spine target: my_clos2/spine pattern: one_to_one - link_params: - capacity: 400.0 # 400 Gb/s inter-Clos spine links - cost: 1 + capacity: 400.0 # 400 Gb/s inter-Clos spine links + cost: 1 - link_overrides: + link_rules: # Overriding a link between two spine devices. - source: my_clos1/spine/t3-1$ target: my_clos2/spine/t3-1$ - link_params: - capacity: 200.0 # Override capacity to 200 Gb/s - cost: 1 + capacity: 200.0 # Override capacity to 200 Gb/s + cost: 1 # Set fiber conduit risk groups and hw_component on all spine to spine links # These inter-clos links traverse a fiber conduit between buildings - source: .*/spine/.* target: .*/spine/.* - any_direction: True - link_params: - risk_groups: ["Conduit_InterClos_C1"] - attrs: - fiber: - conduit_id: "InterClos-C1" - path_id: "Clos1-Clos2" - hardware: - source: {component: "400G-LR4", count: 1} - target: {component: "400G-LR4", count: 1} + bidirectional: True + risk_groups: ["Conduit_InterClos_C1"] + attrs: + fiber: + conduit_id: "InterClos-C1" + path_id: "Clos1-Clos2" + hardware: + source: {component: "400G-LR4", count: 1} + target: {component: "400G-LR4", count: 1} # Node overrides for facility-based risk groups - node_overrides: + node_rules: - path: my_clos1/b1/t1 risk_groups: ["PowerZone_Clos1_B1_PZA"] attrs: @@ -163,14 +157,14 @@ risk_groups: room_id: "Spine" workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph # Forward direction analysis - equivalent to capacity_probe - - step_type: MaxFlow + - type: MaxFlow name: capacity_analysis_forward source: my_clos1/b.*/t1 - sink: my_clos2/b.*/t1 + target: my_clos2/b.*/t1 mode: combine shortest_path: true flow_placement: PROPORTIONAL @@ -178,10 +172,10 @@ workflow: failure_policy: null # Reverse direction analysis - equivalent to capacity_probe with probe_reverse - - step_type: MaxFlow + - type: MaxFlow name: capacity_analysis_reverse source: my_clos2/b.*/t1 - sink: my_clos1/b.*/t1 + target: my_clos1/b.*/t1 mode: combine shortest_path: true flow_placement: PROPORTIONAL @@ -189,10 +183,10 @@ workflow: failure_policy: null # Forward direction with EQUAL_BALANCED - equivalent to capacity_probe2 - - step_type: MaxFlow + - type: MaxFlow name: capacity_analysis_forward_balanced source: my_clos1/b.*/t1 - sink: my_clos2/b.*/t1 + target: my_clos2/b.*/t1 mode: combine shortest_path: true flow_placement: EQUAL_BALANCED @@ -200,10 +194,10 @@ workflow: failure_policy: null # Reverse direction with EQUAL_BALANCED - equivalent to capacity_probe2 with probe_reverse - - step_type: MaxFlow + - type: MaxFlow name: capacity_analysis_reverse_balanced source: my_clos2/b.*/t1 - sink: my_clos1/b.*/t1 + target: my_clos1/b.*/t1 mode: combine shortest_path: true flow_placement: EQUAL_BALANCED diff --git a/tests/integration/scenario_4.yaml b/tests/integration/scenario_4.yaml index 66b7831..6c51cca 100644 --- a/tests/integration/scenario_4.yaml +++ b/tests/integration/scenario_4.yaml @@ -153,10 +153,10 @@ risk_groups: blueprints: # Basic server rack with ToR switch server_rack: - groups: + nodes: tor: - node_count: 1 - name_template: "tor-{node_num}" + count: 1 + template: "tor-{n}" attrs: hardware: component: "ToRSwitch48p" @@ -164,30 +164,29 @@ blueprints: role: "top_of_rack" risk_groups: ["CoolingZone_DC1_R1_CZA"] servers: - node_count: 8 # 8 servers per rack for test efficiency - name_template: "srv-{node_num}" + count: 8 # 8 servers per rack for test efficiency + template: "srv-{n}" attrs: hardware: component: "ServerNode" count: 1 role: "compute" risk_groups: ["CoolingZone_DC1_R1_CZA"] - adjacency: + links: - source: /servers target: /tor pattern: "one_to_one" - link_params: - capacity: 25.0 # 25 Gb/s server uplinks - cost: 1 - attrs: - media_type: "copper" + capacity: 25.0 # 25 Gb/s server uplinks + cost: 1 + attrs: + media_type: "copper" # Spine-leaf fabric with variable expansion leaf_spine_fabric: - groups: + nodes: leaf: - node_count: 2 # 2 leaf switches per fabric - name_template: "leaf-{node_num}" + count: 2 # 2 leaf switches per fabric + template: "leaf-{n}" attrs: hardware: component: "ToRSwitch48p" @@ -195,39 +194,39 @@ blueprints: role: "leaf" # Leaf switches in DC1 and DC2 have their own power zones spine: - node_count: 2 # 2 spine switches per fabric - name_template: "spine-{node_num}" + count: 2 # 2 spine switches per fabric + template: "spine-{n}" attrs: hardware: component: "SpineSwitch32p" count: 1 role: "spine" # Spine switches share room-level risk (cooling/power in spine room) - adjacency: + links: # Variable expansion for leaf-spine connectivity using $var syntax - source: "leaf-${leaf_id}" target: "spine-${spine_id}" - expand_vars: - leaf_id: [1, 2] - spine_id: [1, 2] - expansion_mode: "cartesian" + expand: + vars: + leaf_id: [1, 2] + spine_id: [1, 2] + mode: "cartesian" pattern: "mesh" - link_params: - capacity: 400.0 # 400 Gb/s leaf-spine links - cost: 1 - attrs: - media_type: "fiber" - link_type: "leaf_spine" + capacity: 400.0 # 400 Gb/s leaf-spine links + cost: 1 + attrs: + media_type: "fiber" + link_type: "leaf_spine" network: name: "Advanced DSL Demonstration" version: "2.0" - groups: + nodes: # Multi-datacenter pod and rack expansion # Each rack group inherits building-level power zone risk dc[1-2]_pod[a,b]_rack[1-2]: - use_blueprint: server_rack + blueprint: server_rack attrs: datacenter: "dc1" pod: "poda" @@ -237,45 +236,44 @@ network: # Fabric per DC using bracket expansion # Fabric equipment shares building-level risk dc[1-2]_fabric: - use_blueprint: leaf_spine_fabric + blueprint: leaf_spine_fabric attrs: datacenter: "dc1" risk_groups: ["Building_DC1"] # Top-level adjacency with variable expansion using $var syntax - adjacency: + links: # Connect racks to fabric using variable expansion - source: "dc${dc}_pod${pod}_rack${rack}/tor" target: "dc${dc}_fabric/leaf" - expand_vars: - dc: [1, 2] - pod: ["a", "b"] - rack: [1, 2] - expansion_mode: "cartesian" + expand: + vars: + dc: [1, 2] + pod: ["a", "b"] + rack: [1, 2] + mode: "cartesian" pattern: "one_to_one" - link_params: - capacity: 100.0 # 100 Gb/s rack-to-fabric uplinks - cost: 2 - attrs: - connection_type: "rack_to_fabric" + capacity: 100.0 # 100 Gb/s rack-to-fabric uplinks + cost: 2 + attrs: + connection_type: "rack_to_fabric" # Inter-DC spine connectivity - links traverse fiber conduit between buildings - source: "dc1_fabric/spine" target: "dc2_fabric/spine" pattern: "mesh" - link_params: - capacity: 400.0 # 400 Gb/s inter-DC links - cost: 10 - risk_groups: ["Conduit_DC1_DC2_C1"] - attrs: - connection_type: "inter_dc" - fiber: - path_id: "DC1-DC2" - conduit_id: "DC1-DC2-C1" - distance_km: 50 + capacity: 400.0 # 400 Gb/s inter-DC links + cost: 10 + risk_groups: ["Conduit_DC1_DC2_C1"] + attrs: + connection_type: "inter_dc" + fiber: + path_id: "DC1-DC2" + conduit_id: "DC1-DC2-C1" + distance_km: 50 # Complex node overrides with regex patterns - node_overrides: + node_rules: # Override DC1 spine switches - assign to DC1 spine room - path: "dc1_fabric/spine/spine-[1-2]" attrs: @@ -324,44 +322,42 @@ network: maintenance_status: "scheduled" # Complex link overrides - link_overrides: + link_rules: # Higher capacity for inter-DC links - both conduit and path level risk - source: "dc1_fabric/spine/.*" target: "dc2_fabric/spine/.*" - any_direction: true - link_params: - capacity: 800.0 # 800 Gb/s inter-DC links - cost: 5 - risk_groups: ["Conduit_DC1_DC2_C1", "Path_DC1_DC2"] - attrs: - link_class: "inter_dc" - encryption: "enabled" - fiber: - path_id: "DC1-DC2" - conduit_id: "DC1-DC2-C1" + bidirectional: true + capacity: 800.0 # 800 Gb/s inter-DC links + cost: 5 + risk_groups: ["Conduit_DC1_DC2_C1", "Path_DC1_DC2"] + attrs: + link_class: "inter_dc" + encryption: "enabled" + fiber: + path_id: "DC1-DC2" + conduit_id: "DC1-DC2-C1" # Higher capacity uplinks for specific racks - source: "dc1_pod[ab]_rack1/tor/.*" target: "dc1_fabric/leaf/.*" - link_params: - capacity: 200.0 # 200 Gb/s uplinks - cost: 1 + capacity: 200.0 # 200 Gb/s uplinks + cost: 1 # Traffic patterns for realistic workloads -traffic_matrix_set: +demands: default: # East-west traffic within DC - source: "dc1_pod[ab]_rack.*/servers/.*" - sink: "dc1_pod[ab]_rack.*/servers/.*" - demand: 5.0 # 5 Gb/s east-west traffic + target: "dc1_pod[ab]_rack.*/servers/.*" + volume: 5.0 # 5 Gb/s east-west traffic mode: "pairwise" attrs: traffic_type: "east_west" # North-south traffic to external - source: "dc1_.*servers/.*" - sink: "dc2_.*servers/.*" - demand: 10.0 # 10 Gb/s inter-DC traffic + target: "dc2_.*servers/.*" + volume: 10.0 # 10 Gb/s inter-DC traffic mode: "combine" attrs: traffic_type: "inter_dc" @@ -369,22 +365,22 @@ traffic_matrix_set: # High-performance computing workload hpc_workload: - source: "dc1_poda_rack1/servers/srv-[1-4]" - sink: "dc1_poda_rack1/servers/srv-[1-4]" - demand: 20.0 # 20 Gb/s HPC collective communication + target: "dc1_poda_rack1/servers/srv-[1-4]" + volume: 20.0 # 20 Gb/s HPC collective communication mode: "pairwise" attrs: traffic_type: "hpc_collective" # Failure policies for realistic failure scenarios -failure_policy_set: +failures: single_link_failure: attrs: description: "Single link failure" modes: - weight: 1.0 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 1 single_node_failure: @@ -393,8 +389,8 @@ failure_policy_set: modes: - weight: 1.0 rules: - - entity_scope: "node" - rule_type: "choice" + - scope: "node" + mode: "choice" count: 1 default: @@ -403,21 +399,21 @@ failure_policy_set: modes: - weight: 1.0 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 1 # Multi-step workflow demonstrating various workflow steps workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph # Capacity analysis with different traffic patterns # Forward intra-DC capacity analysis - - step_type: MaxFlow + - type: MaxFlow name: intra_dc_capacity_forward source: "dc1_pod[ab]_rack.*/servers/.*" - sink: "dc1_pod[ab]_rack.*/servers/.*" + target: "dc1_pod[ab]_rack.*/servers/.*" mode: "combine" shortest_path: false flow_placement: "PROPORTIONAL" @@ -425,10 +421,10 @@ workflow: failure_policy: null # Reverse intra-DC capacity analysis - - step_type: MaxFlow + - type: MaxFlow name: intra_dc_capacity_reverse source: "dc1_pod[ab]_rack.*/servers/.*" - sink: "dc1_pod[ab]_rack.*/servers/.*" + target: "dc1_pod[ab]_rack.*/servers/.*" mode: "combine" shortest_path: false flow_placement: "PROPORTIONAL" @@ -436,10 +432,10 @@ workflow: failure_policy: null # Forward inter-DC capacity analysis - - step_type: MaxFlow + - type: MaxFlow name: inter_dc_capacity_forward source: "dc1_.*servers/.*" - sink: "dc2_.*servers/.*" + target: "dc2_.*servers/.*" mode: "combine" shortest_path: false flow_placement: "EQUAL_BALANCED" @@ -447,10 +443,10 @@ workflow: failure_policy: null # Reverse inter-DC capacity analysis - - step_type: MaxFlow + - type: MaxFlow name: inter_dc_capacity_reverse source: "dc2_.*servers/.*" - sink: "dc1_.*servers/.*" + target: "dc1_.*servers/.*" mode: "combine" shortest_path: false flow_placement: "EQUAL_BALANCED" @@ -458,10 +454,10 @@ workflow: failure_policy: null # Failure analysis with different policies - - step_type: MaxFlow + - type: MaxFlow name: rack_failure_analysis source: "dc1_pod[ab]_rack.*/servers/.*" - sink: "dc1_pod[ab]_rack.*/servers/.*" + target: "dc1_pod[ab]_rack.*/servers/.*" mode: "combine" failure_policy: "single_link_failure" iterations: 10 # 10 iterations for test efficiency @@ -469,10 +465,10 @@ workflow: shortest_path: false flow_placement: "PROPORTIONAL" - - step_type: MaxFlow + - type: MaxFlow name: spine_failure_analysis source: "dc1_.*servers/.*" - sink: "dc2_.*servers/.*" + target: "dc2_.*servers/.*" mode: "combine" failure_policy: "single_node_failure" iterations: 20 # 20 iterations for test efficiency diff --git a/tests/integration/test_data_templates.py b/tests/integration/test_data_templates.py index 225bc22..6a8beb8 100644 --- a/tests/integration/test_data_templates.py +++ b/tests/integration/test_data_templates.py @@ -59,7 +59,8 @@ def linear_network( { "source": node_names[i], "target": node_names[i + 1], - "link_params": {"capacity": link_capacity, "cost": 1}, + "capacity": link_capacity, + "cost": 1, } ) @@ -78,7 +79,8 @@ def star_network( { "source": center_node, "target": leaf, - "link_params": {"capacity": link_capacity, "cost": 1}, + "capacity": link_capacity, + "cost": 1, } ) @@ -98,7 +100,8 @@ def mesh_network( { "source": source, "target": target, - "link_params": {"capacity": link_capacity, "cost": 1}, + "capacity": link_capacity, + "cost": 1, } ) @@ -117,7 +120,8 @@ def ring_network( { "source": node_names[i], "target": node_names[next_i], - "link_params": {"capacity": link_capacity, "cost": 1}, + "capacity": link_capacity, + "cost": 1, } ) @@ -150,7 +154,8 @@ def tree_network( { "source": parent_name, "target": child_name, - "link_params": {"capacity": link_capacity, "cost": 1}, + "capacity": link_capacity, + "cost": 1, } ) @@ -165,17 +170,13 @@ class BlueprintTemplates: @staticmethod def simple_group_blueprint( - group_name: str, node_count: int, name_template: Optional[str] = None + group_name: str, count: int, template: Optional[str] = None ) -> Dict[str, Any]: """Create a simple blueprint with one group of nodes.""" - if name_template is None: - name_template = f"{group_name}-{{node_num}}" + if template is None: + template = f"{group_name}-{{n}}" - return { - "groups": { - group_name: {"node_count": node_count, "name_template": name_template} - } - } + return {"nodes": {group_name: {"count": count, "template": template}}} @staticmethod def two_tier_blueprint( @@ -186,16 +187,17 @@ def two_tier_blueprint( ) -> Dict[str, Any]: """Create a two-tier blueprint (leaf-spine pattern).""" return { - "groups": { - "tier1": {"node_count": tier1_count, "name_template": "t1-{node_num}"}, - "tier2": {"node_count": tier2_count, "name_template": "t2-{node_num}"}, + "nodes": { + "tier1": {"count": tier1_count, "template": "t1-{n}"}, + "tier2": {"count": tier2_count, "template": "t2-{n}"}, }, - "adjacency": [ + "links": [ { "source": "/tier1", "target": "/tier2", "pattern": pattern, - "link_params": {"capacity": link_capacity, "cost": 1}, + "capacity": link_capacity, + "cost": 1, } ], } @@ -209,29 +211,31 @@ def three_tier_clos_blueprint( ) -> Dict[str, Any]: """Create a three-tier Clos blueprint.""" return { - "groups": { - "leaf": {"node_count": leaf_count, "name_template": "leaf-{node_num}"}, + "nodes": { + "leaf": {"count": leaf_count, "template": "leaf-{n}"}, "spine": { - "node_count": spine_count, - "name_template": "spine-{node_num}", + "count": spine_count, + "template": "spine-{n}", }, "super_spine": { - "node_count": super_spine_count, - "name_template": "ss-{node_num}", + "count": super_spine_count, + "template": "ss-{n}", }, }, - "adjacency": [ + "links": [ { "source": "/leaf", "target": "/spine", "pattern": "mesh", - "link_params": {"capacity": link_capacity, "cost": 1}, + "capacity": link_capacity, + "cost": 1, }, { "source": "/spine", "target": "/super_spine", "pattern": "mesh", - "link_params": {"capacity": link_capacity, "cost": 1}, + "capacity": link_capacity, + "cost": 1, }, ], } @@ -244,11 +248,11 @@ def nested_blueprint( ) -> Dict[str, Any]: """Create a blueprint that wraps another blueprint with additional components.""" blueprint_data = { - "groups": {wrapper_group_name: {"use_blueprint": inner_blueprint_name}} + "nodes": {wrapper_group_name: {"blueprint": inner_blueprint_name}} } if additional_groups: - blueprint_data["groups"].update(additional_groups) + blueprint_data["nodes"].update(additional_groups) return blueprint_data @@ -266,9 +270,7 @@ def single_link_failure() -> Dict[str, Any]: "modes": [ { "weight": 1.0, - "rules": [ - {"entity_scope": "link", "rule_type": "choice", "count": 1} - ], + "rules": [{"scope": "link", "mode": "choice", "count": 1}], } ], } @@ -283,27 +285,25 @@ def single_node_failure() -> Dict[str, Any]: "modes": [ { "weight": 1.0, - "rules": [ - {"entity_scope": "node", "rule_type": "choice", "count": 1} - ], + "rules": [{"scope": "node", "mode": "choice", "count": 1}], } ], } @staticmethod - def multiple_failure(entity_scope: str, count: int) -> Dict[str, Any]: + def multiple_failure(scope: str, count: int) -> Dict[str, Any]: """Template for multiple simultaneous failures.""" return { "attrs": { - "description": f"Multiple {entity_scope} failure scenario", + "description": f"Multiple {scope} failure scenario", }, "modes": [ { "weight": 1.0, "rules": [ { - "entity_scope": entity_scope, - "rule_type": "choice", + "scope": scope, + "mode": "choice", "count": count, } ], @@ -318,9 +318,7 @@ def all_links_failure() -> Dict[str, Any]: "attrs": { "description": "All links failure scenario", }, - "modes": [ - {"weight": 1.0, "rules": [{"entity_scope": "link", "rule_type": "all"}]} - ], + "modes": [{"weight": 1.0, "rules": [{"scope": "link", "mode": "all"}]}], } @staticmethod @@ -330,18 +328,18 @@ def risk_group_failure(risk_group_name: str) -> Dict[str, Any]: "attrs": { "description": f"Failure of risk group {risk_group_name}", }, - "fail_risk_groups": True, + "expand_groups": True, "modes": [ { "weight": 1.0, "rules": [ { - "entity_scope": "link", - "rule_type": "all", + "scope": "link", + "mode": "all", "conditions": [ { "attr": "risk_groups", - "operator": "contains", + "op": "contains", "value": risk_group_name, } ], @@ -362,13 +360,13 @@ def all_to_all_uniform( """Create uniform all-to-all traffic demands.""" demands = [] for source in node_names: - for sink in node_names: - if source != sink: # Skip self-demands + for target in node_names: + if source != target: # Skip self-demands demands.append( { "source": source, - "sink": sink, - "demand": demand_value, + "target": target, + "volume": demand_value, } ) return demands @@ -383,13 +381,13 @@ def star_traffic( # Traffic from leaves to center for leaf in leaf_nodes: demands.append( - {"source": leaf, "sink": center_node, "demand": demand_value} + {"source": leaf, "target": center_node, "volume": demand_value} ) # Traffic from center to leaves for leaf in leaf_nodes: demands.append( - {"source": center_node, "sink": leaf, "demand": demand_value} + {"source": center_node, "target": leaf, "volume": demand_value} ) return demands @@ -410,10 +408,10 @@ def random_demands( for _ in range(num_demands): source = random.choice(node_names) - sink = random.choice([n for n in node_names if n != source]) + target = random.choice([n for n in node_names if n != source]) demand_value = random.uniform(min_demand, max_demand) - demands.append({"source": source, "sink": sink, "demand": demand_value}) + demands.append({"source": source, "target": target, "volume": demand_value}) return demands @@ -433,20 +431,20 @@ def hotspot_traffic( demands.append( { "source": source, - "sink": hotspot, - "demand": hotspot_demand, + "target": hotspot, + "volume": hotspot_demand, } ) # Normal demand for other traffic for source in other_nodes: - for sink in other_nodes: - if source != sink: + for target in other_nodes: + if source != target: demands.append( { "source": source, - "sink": sink, - "demand": normal_demand, + "target": target, + "volume": normal_demand, } ) @@ -459,25 +457,25 @@ class WorkflowTemplates: @staticmethod def basic_build_workflow() -> List[Dict[str, Any]]: """Basic workflow that just builds the graph.""" - return [{"step_type": "BuildGraph", "name": "build_graph"}] + return [{"type": "BuildGraph", "name": "build_graph"}] @staticmethod def capacity_analysis_workflow( - source_pattern: str, sink_pattern: str, modes: Optional[List[str]] = None + source_pattern: str, target_pattern: str, modes: Optional[List[str]] = None ) -> List[Dict[str, Any]]: - """Workflow for capacity analysis between source and sink patterns.""" + """Workflow for capacity analysis between source and target patterns.""" if modes is None: modes = ["combine", "pairwise"] - workflow = [{"step_type": "BuildGraph", "name": "build_graph"}] + workflow = [{"type": "BuildGraph", "name": "build_graph"}] for i, mode in enumerate(modes): workflow.append( { - "step_type": "MaxFlow", + "type": "MaxFlow", "name": f"capacity_analysis_{i}", "source": source_pattern, - "sink": sink_pattern, + "target": target_pattern, "mode": mode, "iterations": 1, "failure_policy": None, @@ -489,16 +487,16 @@ def capacity_analysis_workflow( @staticmethod def failure_analysis_workflow( - source_pattern: str, sink_pattern: str, failure_policy_name: str = "default" + source_pattern: str, target_pattern: str ) -> List[Dict[str, Any]]: """Workflow for analyzing network under failures.""" return [ - {"step_type": "BuildGraph", "name": "build_graph"}, + {"type": "BuildGraph", "name": "build_graph"}, { - "step_type": "MaxFlow", + "type": "MaxFlow", "name": "failure_analysis", "source": source_pattern, - "sink": sink_pattern, + "target": target_pattern, "iterations": 100, "parallelism": 4, }, @@ -506,33 +504,33 @@ def failure_analysis_workflow( @staticmethod def comprehensive_analysis_workflow( - source_pattern: str, sink_pattern: str + source_pattern: str, target_pattern: str ) -> List[Dict[str, Any]]: """Comprehensive workflow with multiple analysis steps.""" return [ - {"step_type": "BuildGraph", "name": "build_graph"}, + {"type": "BuildGraph", "name": "build_graph"}, { - "step_type": "MaxFlow", + "type": "MaxFlow", "name": "capacity_analysis_combine", "source": source_pattern, - "sink": sink_pattern, + "target": target_pattern, "mode": "combine", "iterations": 1, }, { - "step_type": "MaxFlow", + "type": "MaxFlow", "name": "capacity_analysis_pairwise", "source": source_pattern, - "sink": sink_pattern, + "target": target_pattern, "mode": "pairwise", "shortest_path": True, "iterations": 1, }, { - "step_type": "MaxFlow", + "type": "MaxFlow", "name": "envelope_analysis", "source": source_pattern, - "sink": sink_pattern, + "target": target_pattern, "iterations": 50, }, ] @@ -596,11 +594,11 @@ def with_clos_fabric( # Add to network if "network" not in self.builder.data: self.builder.data["network"] = {"name": self.name, "version": self.version} - if "groups" not in self.builder.data["network"]: - self.builder.data["network"]["groups"] = {} + if "nodes" not in self.builder.data["network"]: + self.builder.data["network"]["nodes"] = {} - self.builder.data["network"]["groups"][fabric_name] = { - "use_blueprint": "clos_fabric" + self.builder.data["network"]["nodes"][fabric_name] = { + "blueprint": "clos_fabric" } return self @@ -611,19 +609,19 @@ def with_uniform_traffic( """Add uniform traffic demands between node patterns.""" demands = [] for source_pattern in node_patterns: - for sink_pattern in node_patterns: - if source_pattern != sink_pattern: + for target_pattern in node_patterns: + if source_pattern != target_pattern: demands.append( { "source": source_pattern, - "sink": sink_pattern, - "demand": demand_value, + "target": target_pattern, + "volume": demand_value, } ) - if "traffic_matrix_set" not in self.builder.data: - self.builder.data["traffic_matrix_set"] = {} - self.builder.data["traffic_matrix_set"]["default"] = demands + if "demands" not in self.builder.data: + self.builder.data["demands"] = {} + self.builder.data["demands"]["default"] = demands return self @@ -653,9 +651,9 @@ class CommonScenarios: """Pre-built scenario templates for common testing patterns.""" @staticmethod - def simple_linear_with_failures(node_count: int = 4) -> str: + def simple_linear_with_failures(count: int = 4) -> str: """Simple linear network with single link failure analysis.""" - nodes = [f"Node{i}" for i in range(1, node_count + 1)] + nodes = [f"Node{i}" for i in range(1, count + 1)] return ( ScenarioTemplateBuilder("simple_linear", "1.0") @@ -733,7 +731,8 @@ def missing_nodes_builder() -> ScenarioDataBuilder: { "source": "NodeA", "target": "NonexistentNode", - "link_params": {"capacity": 10, "cost": 1}, + "capacity": 10, + "cost": 1, } ] builder.with_workflow_step("BuildGraph", "build_graph") @@ -744,14 +743,14 @@ def circular_blueprint_builder() -> ScenarioDataBuilder: """Create scenario builder with circular blueprint references.""" builder = ScenarioDataBuilder() builder.with_blueprint( - "blueprint_a", {"groups": {"group_a": {"use_blueprint": "blueprint_b"}}} + "blueprint_a", {"nodes": {"group_a": {"blueprint": "blueprint_b"}}} ) builder.with_blueprint( - "blueprint_b", {"groups": {"group_b": {"use_blueprint": "blueprint_a"}}} + "blueprint_b", {"nodes": {"group_b": {"blueprint": "blueprint_a"}}} ) builder.data["network"] = { "name": "circular_test", - "groups": {"test_group": {"use_blueprint": "blueprint_a"}}, + "nodes": {"test_group": {"blueprint": "blueprint_a"}}, } builder.with_workflow_step("BuildGraph", "build_graph") return builder @@ -767,8 +766,8 @@ def invalid_failure_policy_builder() -> ScenarioDataBuilder: { "rules": [ { - "entity_scope": "invalid_scope", # Invalid scope - "rule_type": "choice", + "scope": "invalid_scope", # Invalid scope + "mode": "choice", "count": 1, } ] @@ -796,7 +795,7 @@ def missing_workflow_params_builder() -> ScenarioDataBuilder: # Add CapacityEnvelopeAnalysis without required parameters builder.data["workflow"] = [ { - "step_type": "CapacityEnvelopeAnalysis", + "type": "CapacityEnvelopeAnalysis", "name": "incomplete_analysis", # Missing source and sink } @@ -804,16 +803,16 @@ def missing_workflow_params_builder() -> ScenarioDataBuilder: return builder @staticmethod - def large_network_builder(node_count: int = 1000) -> ScenarioDataBuilder: + def large_network_builder(count: int = 1000) -> ScenarioDataBuilder: """Create scenario builder for stress testing with large networks.""" builder = ScenarioDataBuilder() # Create many nodes - node_names = [f"Node_{i:04d}" for i in range(node_count)] + node_names = [f"Node_{i:04d}" for i in range(count)] builder.with_simple_nodes(node_names) # Create star topology to avoid O(n²) mesh complexity - if node_count > 1: + if count > 1: center_node = node_names[0] leaf_nodes = node_names[1:] @@ -837,10 +836,10 @@ def deep_blueprint_nesting_builder(depth: int = 15) -> ScenarioDataBuilder: builder.with_blueprint( f"level_{i}", { - "groups": { - "nodes": { - "node_count": 1, - "name_template": f"level_{i}_node_{{node_num}}", + "nodes": { + "leaf": { + "count": 1, + "template": f"level_{i}_node_{{n}}", } } }, @@ -848,13 +847,13 @@ def deep_blueprint_nesting_builder(depth: int = 15) -> ScenarioDataBuilder: else: builder.with_blueprint( f"level_{i}", - {"groups": {"nested": {"use_blueprint": f"level_{i - 1}"}}}, + {"nodes": {"nested": {"blueprint": f"level_{i - 1}"}}}, ) # Use the deepest blueprint builder.data["network"] = { "name": "deep_nesting_test", - "groups": {"deep_group": {"use_blueprint": f"level_{depth - 1}"}}, + "nodes": {"deep_group": {"blueprint": f"level_{depth - 1}"}}, } builder.with_workflow_step("BuildGraph", "build_graph") return builder @@ -880,10 +879,10 @@ def single_node_builder(node_name: str = "LonelyNode") -> ScenarioDataBuilder: return builder @staticmethod - def isolated_nodes_builder(node_count: int = 5) -> ScenarioDataBuilder: + def isolated_nodes_builder(count: int = 5) -> ScenarioDataBuilder: """Create scenario builder with multiple isolated nodes.""" builder = ScenarioDataBuilder() - node_names = [f"Isolated_{i}" for i in range(node_count)] + node_names = [f"Isolated_{i}" for i in range(count)] builder.with_simple_nodes(node_names) # No links - all nodes isolated builder.with_workflow_step("BuildGraph", "build_graph") @@ -895,8 +894,8 @@ def zero_capacity_links_builder() -> ScenarioDataBuilder: builder = ScenarioDataBuilder() builder.with_simple_nodes(["A", "B", "C"]) builder.data["network"]["links"] = [ - {"source": "A", "target": "B", "link_params": {"capacity": 0, "cost": 1}}, - {"source": "B", "target": "C", "link_params": {"capacity": 0, "cost": 1}}, + {"source": "A", "target": "B", "capacity": 0, "cost": 1}, + {"source": "B", "target": "C", "capacity": 0, "cost": 1}, ] builder.with_workflow_step("BuildGraph", "build_graph") return builder @@ -910,10 +909,8 @@ def extreme_values_builder() -> ScenarioDataBuilder: { "source": "NodeA", "target": "NodeB", - "link_params": { - "capacity": 999999999999, # Very large capacity - "cost": 999999999999, # Very large cost - }, + "capacity": 999999999999, # Very large capacity + "cost": 999999999999, # Very large cost } ] builder.with_traffic_demand("NodeA", "NodeB", 888888888888.0) # Large demand @@ -942,9 +939,9 @@ def duplicate_links_builder() -> ScenarioDataBuilder: # Add multiple links with different parameters builder.data["network"]["links"] = [ - {"source": "A", "target": "B", "link_params": {"capacity": 10, "cost": 1}}, - {"source": "A", "target": "B", "link_params": {"capacity": 20, "cost": 2}}, - {"source": "A", "target": "B", "link_params": {"capacity": 15, "cost": 3}}, + {"source": "A", "target": "B", "capacity": 10, "cost": 1}, + {"source": "A", "target": "B", "capacity": 20, "cost": 2}, + {"source": "A", "target": "B", "capacity": 15, "cost": 3}, ] builder.with_workflow_step("BuildGraph", "build_graph") return builder @@ -983,16 +980,17 @@ def large_mesh_blueprint_builder(side_size: int = 20) -> ScenarioDataBuilder: # Create large mesh blueprint large_mesh_blueprint = { - "groups": { - "side_a": {"node_count": side_size, "name_template": "a-{node_num}"}, - "side_b": {"node_count": side_size, "name_template": "b-{node_num}"}, + "nodes": { + "side_a": {"count": side_size, "template": "a-{n}"}, + "side_b": {"count": side_size, "template": "b-{n}"}, }, - "adjacency": [ + "links": [ { "source": "/side_a", "target": "/side_b", "pattern": "mesh", - "link_params": {"capacity": 1, "cost": 1}, + "capacity": 1, + "cost": 1, } ], } @@ -1000,7 +998,7 @@ def large_mesh_blueprint_builder(side_size: int = 20) -> ScenarioDataBuilder: builder.with_blueprint("large_mesh", large_mesh_blueprint) builder.data["network"] = { "name": "large_mesh_test", - "groups": {"mesh_group": {"use_blueprint": "large_mesh"}}, + "nodes": {"mesh_group": {"blueprint": "large_mesh"}}, } builder.with_workflow_step("BuildGraph", "build_graph") return builder @@ -1016,23 +1014,25 @@ def complex_multi_blueprint_builder() -> ScenarioDataBuilder: # Create aggregation layer agg_layer = { - "groups": { - "brick1": {"use_blueprint": "basic_brick"}, - "brick2": {"use_blueprint": "basic_brick"}, - "agg_spine": {"node_count": 8, "name_template": "agg-{node_num}"}, + "nodes": { + "brick1": {"blueprint": "basic_brick"}, + "brick2": {"blueprint": "basic_brick"}, + "agg_spine": {"count": 8, "template": "agg-{n}"}, }, - "adjacency": [ + "links": [ { "source": "brick1/tier2", "target": "agg_spine", "pattern": "mesh", - "link_params": {"capacity": 20, "cost": 1}, + "capacity": 20, + "cost": 1, }, { "source": "brick2/tier2", "target": "agg_spine", "pattern": "mesh", - "link_params": {"capacity": 20, "cost": 1}, + "capacity": 20, + "cost": 1, }, ], } @@ -1040,23 +1040,25 @@ def complex_multi_blueprint_builder() -> ScenarioDataBuilder: # Create core layer core_layer = { - "groups": { - "agg1": {"use_blueprint": "agg_layer"}, - "agg2": {"use_blueprint": "agg_layer"}, - "core_spine": {"node_count": 4, "name_template": "core-{node_num}"}, + "nodes": { + "agg1": {"blueprint": "agg_layer"}, + "agg2": {"blueprint": "agg_layer"}, + "core_spine": {"count": 4, "template": "core-{n}"}, }, - "adjacency": [ + "links": [ { "source": "agg1/agg_spine", "target": "core_spine", "pattern": "mesh", - "link_params": {"capacity": 40, "cost": 1}, + "capacity": 40, + "cost": 1, }, { "source": "agg2/agg_spine", "target": "core_spine", "pattern": "mesh", - "link_params": {"capacity": 40, "cost": 1}, + "capacity": 40, + "cost": 1, }, ], } @@ -1065,7 +1067,7 @@ def complex_multi_blueprint_builder() -> ScenarioDataBuilder: # Use in network builder.data["network"] = { "name": "complex_multi_blueprint", - "groups": {"datacenter": {"use_blueprint": "core_layer"}}, + "nodes": {"datacenter": {"blueprint": "core_layer"}}, } # Add capacity analysis workflow diff --git a/tests/integration/test_error_cases.py b/tests/integration/test_error_cases.py index 0398a9c..58a792e 100644 --- a/tests/integration/test_error_cases.py +++ b/tests/integration/test_error_cases.py @@ -49,8 +49,13 @@ def test_invalid_node_definitions(self): disabled: "not_a_boolean" # Should be boolean """ - # NetGraph rejects invalid keys during parsing - with pytest.raises(ValueError, match="Unrecognized key"): + # Schema validation now catches invalid keys + import jsonschema.exceptions + + with pytest.raises( + jsonschema.exceptions.ValidationError, + match="Additional properties are not allowed", + ): _scenario = Scenario.from_yaml(invalid_node_yaml) def test_invalid_link_definitions(self): @@ -59,7 +64,7 @@ def test_invalid_link_definitions(self): assert True def test_nonexistent_link_endpoints(self): - """Test links referencing nonexistent nodes.""" + """Test links referencing nonexistent nodes are silently skipped.""" # Use raw YAML since builder would validate node existence invalid_endpoints_yaml = """ network: @@ -68,17 +73,18 @@ def test_nonexistent_link_endpoints(self): links: - source: NodeA target: NonexistentNode # Node doesn't exist - link_params: - capacity: 10 - cost: 1 + capacity: 10 + cost: 1 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph """ - with pytest.raises((ValueError, KeyError)): - scenario = Scenario.from_yaml(invalid_endpoints_yaml) - scenario.run() + # Links to unknown nodes are silently skipped by mesh pattern + scenario = Scenario.from_yaml(invalid_endpoints_yaml) + scenario.run() + assert "NodeA" in scenario.network.nodes + assert len(scenario.network.links) == 0 # Link is silently skipped @pytest.mark.slow @@ -91,9 +97,9 @@ def test_nonexistent_blueprint_reference(self): invalid_blueprint_ref = """ network: name: "test_network" - groups: + nodes: test_group: - use_blueprint: nonexistent_blueprint # Doesn't exist + blueprint: nonexistent_blueprint # Doesn't exist """ with pytest.raises((ValueError, KeyError)): @@ -104,19 +110,19 @@ def test_circular_blueprint_references(self): """Test circular references between blueprints.""" builder = ScenarioDataBuilder() builder.with_blueprint( - "blueprint_a", {"groups": {"group_a": {"use_blueprint": "blueprint_b"}}} + "blueprint_a", {"nodes": {"group_a": {"blueprint": "blueprint_b"}}} ) builder.with_blueprint( "blueprint_b", { - "groups": {"group_b": {"use_blueprint": "blueprint_a"}} # Circular! + "nodes": {"group_b": {"blueprint": "blueprint_a"}} # Circular! }, ) # Add network using one of the circular blueprints builder.data["network"] = { "name": "test_network", - "groups": {"test_group": {"use_blueprint": "blueprint_a"}}, + "nodes": {"test_group": {"blueprint": "blueprint_a"}}, } builder.with_workflow_step("BuildGraph", "build_graph") @@ -130,33 +136,32 @@ def test_invalid_blueprint_parameters(self): assert True def test_malformed_adjacency_patterns(self): - """Test malformed adjacency pattern definitions.""" + """Test malformed link patterns.""" import jsonschema.exceptions # Use raw YAML for invalid pattern value that builder might validate malformed_adjacency = """ blueprints: bad_blueprint: - groups: + nodes: group1: - node_count: 2 - name_template: "node-{node_num}" + count: 2 + template: "node-{n}" group2: - node_count: 2 - name_template: "node-{node_num}" - adjacency: + count: 2 + template: "node-{n}" + links: - source: group1 target: group2 pattern: "invalid_pattern" # Should be 'mesh' or 'one_to_one' - link_params: - capacity: 10 + capacity: 10 network: - groups: + nodes: test_group: - use_blueprint: bad_blueprint + blueprint: bad_blueprint workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph """ @@ -239,20 +244,18 @@ def test_self_loop_links(self): links: - source: NodeA target: NodeA # Self-loop - link_params: - capacity: 10 - cost: 1 + capacity: 10 + cost: 1 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph """ - # NetGraph correctly rejects self-loops as invalid - with pytest.raises( - ValueError, match="Link cannot have the same source and target" - ): - scenario = Scenario.from_yaml(self_loop_yaml) - scenario.run() + # NetGraph silently skips self-loops (mesh pattern behavior) + scenario = Scenario.from_yaml(self_loop_yaml) + scenario.run() + assert "NodeA" in scenario.network.nodes + assert len(scenario.network.links) == 0 # Self-loop is silently skipped def test_duplicate_links(self): """Test multiple links between the same pair of nodes.""" @@ -264,7 +267,8 @@ def test_duplicate_links(self): { "source": "NodeA", "target": "NodeB", - "link_params": {"capacity": 20.0, "cost": 2}, + "capacity": 20.0, + "cost": 2, } ) builder.with_workflow_step("BuildGraph", "build_graph") @@ -293,7 +297,8 @@ def test_zero_capacity_links(self): { "source": "NodeA", "target": "NodeB", - "link_params": {"capacity": 0, "cost": 1}, # Zero capacity + "capacity": 0, + "cost": 1, # Zero capacity } ] builder.with_workflow_step("BuildGraph", "build_graph") @@ -314,10 +319,8 @@ def test_very_large_network_parameters(self): { "source": "NodeA", "target": "NodeB", - "link_params": { - "capacity": 999999999999, # Very large capacity - "cost": 999999999999, # Very large cost - }, + "capacity": 999999999999, # Very large capacity + "cost": 999999999999, # Very large cost } ] builder.with_workflow_step("BuildGraph", "build_graph") diff --git a/tests/integration/test_scenario_1.py b/tests/integration/test_scenario_1.py index 38fbee7..81c0c47 100644 --- a/tests/integration/test_scenario_1.py +++ b/tests/integration/test_scenario_1.py @@ -134,11 +134,11 @@ def test_traffic_demands_configuration(self, helper): helper.validate_traffic_demands(expected_count=4) # Verify specific demands from the YAML - default_demands = helper.scenario.traffic_matrix_set.get_default_matrix() + default_demands = helper.scenario.demand_set.get_default_set() # Convert to a more testable format demands_dict = { - (demand.source, demand.sink): demand.demand for demand in default_demands + (demand.source, demand.target): demand.volume for demand in default_demands } expected_demands = { @@ -169,8 +169,8 @@ def test_failure_policy_configuration(self, helper): rule = policy.modes[0].rules[0] assert rule.logic == "or", f"Expected rule logic 'or', found '{rule.logic}'" - assert rule.rule_type == "choice", ( - f"Expected rule type 'choice', found '{rule.rule_type}'" + assert rule.mode == "choice", ( + f"Expected rule type 'choice', found '{rule.mode}'" ) assert rule.count == 1, f"Expected rule count 1, found {rule.count}" diff --git a/tests/integration/test_scenario_2.py b/tests/integration/test_scenario_2.py index b9d5378..4a4ffd4 100644 --- a/tests/integration/test_scenario_2.py +++ b/tests/integration/test_scenario_2.py @@ -116,7 +116,7 @@ def test_blueprint_parameter_overrides(self, helper): f"Parameter override for spine.node_count failed: expected 6, found {len(spine_nodes)}" ) - # Should use overridden name template "myspine-{node_num}" + # Should use overridden name template "myspine-{n}" for node_name in spine_nodes: assert "myspine-" in node_name, ( f"Parameter override for spine.name_template failed: {node_name} " @@ -203,9 +203,9 @@ def test_traffic_demands_configuration(self, helper): helper.validate_traffic_demands(expected_count=4) # Same traffic demands as scenario 1 - default_demands = helper.scenario.traffic_matrix_set.get_default_matrix() + default_demands = helper.scenario.demand_set.get_default_set() demands_dict = { - (demand.source, demand.sink): demand.demand for demand in default_demands + (demand.source, demand.target): demand.volume for demand in default_demands } expected_demands = { diff --git a/tests/integration/test_scenario_4.py b/tests/integration/test_scenario_4.py index cc896f3..d3a72f5 100644 --- a/tests/integration/test_scenario_4.py +++ b/tests/integration/test_scenario_4.py @@ -294,12 +294,12 @@ def test_traffic_matrix_configuration(self, helper): traffic_expectations = SCENARIO_4_TRAFFIC_EXPECTATIONS # Test default matrix - default_matrix = helper.scenario.traffic_matrix_set.matrices.get("default") + default_matrix = helper.scenario.demand_set.sets.get("default") assert default_matrix is not None, "Default traffic matrix should exist" assert len(default_matrix) == traffic_expectations["default_matrix"] # Test HPC workload matrix - hpc_matrix = helper.scenario.traffic_matrix_set.matrices.get("hpc_workload") + hpc_matrix = helper.scenario.demand_set.sets.get("hpc_workload") assert hpc_matrix is not None, "HPC workload matrix should exist" assert len(hpc_matrix) == traffic_expectations["hpc_workload_matrix"] @@ -342,7 +342,6 @@ def test_advanced_workflow_steps(self, helper): # exported["steps"]["build_graph"]["data"].get("graph") # ) # assert graph is not None - # Skipping graph check - node_link_to_graph removed after NetGraph-Core migration # Test MaxFlow results - check baseline (no failure policy) or flow_results intra_dc = ( diff --git a/tests/integration/test_schema_modes.py b/tests/integration/test_schema_modes.py index e9eb599..842b240 100644 --- a/tests/integration/test_schema_modes.py +++ b/tests/integration/test_schema_modes.py @@ -21,20 +21,21 @@ def test_schema_allows_modes_and_weight_by() -> None: links: - source: A target: B - link_params: {capacity: 10, cost: 2} + capacity: 10 + cost: 2 -failure_policy_set: +failures: p1: modes: - weight: 1.0 rules: - - entity_scope: link - rule_type: choice + - scope: link + mode: choice count: 1 weight_by: cost workflow: - - step_type: BuildGraph + - type: BuildGraph name: build """ data = yaml.safe_load(yaml_doc) diff --git a/tests/integration/test_template_examples.py b/tests/integration/test_template_examples.py index 52bfa3a..049bc42 100644 --- a/tests/integration/test_template_examples.py +++ b/tests/integration/test_template_examples.py @@ -49,7 +49,7 @@ def test_star_network_template(self): for link in network_data["links"]: assert link["source"] == center assert link["target"] in leaves - assert link["link_params"]["capacity"] == 20.0 + assert link["capacity"] == 20.0 def test_mesh_network_template(self): """Test full mesh network template creates all-to-all connectivity.""" @@ -89,10 +89,8 @@ def test_tree_network_template(self): @pytest.mark.slow class TestBlueprintTemplates: def test_simple_group_blueprint_minimal(self): - blueprint = BlueprintTemplates.simple_group_blueprint( - "servers", 5, "srv-{node_num}" - ) - assert blueprint["groups"]["servers"]["node_count"] == 5 + blueprint = BlueprintTemplates.simple_group_blueprint("servers", 5, "srv-{n}") + assert blueprint["nodes"]["servers"]["count"] == 5 @pytest.mark.slow @@ -116,7 +114,7 @@ class TestWorkflowTemplates: def test_basic_build_workflow_minimal(self): workflow = WorkflowTemplates.basic_build_workflow() assert len(workflow) == 1 - assert workflow[0]["step_type"] == "BuildGraph" + assert workflow[0]["type"] == "BuildGraph" @pytest.mark.slow @@ -177,7 +175,7 @@ def test_combining_multiple_templates(self): # Add traffic demands demands = TrafficDemandTemplates.all_to_all_uniform(backbone_nodes, 10.0) - builder.builder.data["traffic_matrix_set"] = {"default": demands} + builder.builder.data["demands"] = {"default": demands} # Add failure policy policy = FailurePolicyTemplates.single_link_failure() @@ -259,7 +257,7 @@ def test_template_parameter_validation(self): # Zero count should work blueprint_zero = BlueprintTemplates.two_tier_blueprint(tier1_count=0) - assert blueprint_zero["groups"]["tier1"]["node_count"] == 0 + assert blueprint_zero["nodes"]["tier1"]["count"] == 0 # Negative demands might be allowed in NetGraph - test actual behavior demands_negative = TrafficDemandTemplates.all_to_all_uniform( @@ -268,7 +266,7 @@ def test_template_parameter_validation(self): # Should create demands but with negative values assert len(demands_negative) == 2 # A->B and B->A for demand in demands_negative: - assert demand["demand"] == -5.0 + assert demand["volume"] == -5.0 def test_template_consistency(self): """Test that templates produce consistent results.""" @@ -328,7 +326,8 @@ def test_scenario_1_template_variant(self): { "source": source, "target": target, - "link_params": {"capacity": capacity, "cost": 1}, + "capacity": capacity, + "cost": 1, } ) @@ -337,7 +336,8 @@ def test_scenario_1_template_variant(self): { "source": "DEN", "target": "DFW", - "link_params": {"capacity": 400.0, "cost": 1}, + "capacity": 400.0, + "cost": 1, } ) @@ -345,12 +345,12 @@ def test_scenario_1_template_variant(self): # Add traffic demands matching scenario 1 demands = [ - {"source": "SEA", "sink": "JFK", "demand": 50}, - {"source": "SFO", "sink": "DCA", "demand": 50}, - {"source": "SEA", "sink": "DCA", "demand": 50}, - {"source": "SFO", "sink": "JFK", "demand": 50}, + {"source": "SEA", "target": "JFK", "volume": 50}, + {"source": "SFO", "target": "DCA", "volume": 50}, + {"source": "SEA", "target": "DCA", "volume": 50}, + {"source": "SFO", "target": "JFK", "volume": 50}, ] - builder.builder.data["traffic_matrix_set"] = {"default": demands} + builder.builder.data["demands"] = {"default": demands} # Add failure policy matching scenario 1 policy = FailurePolicyTemplates.single_link_failure() @@ -389,33 +389,34 @@ def test_scenario_2_template_variant(self): tier1_count=4, tier2_count=4, pattern="mesh", link_capacity=100.0 ) # Rename groups to match scenario 2 - clos_2tier["groups"] = { - "leaf": clos_2tier["groups"]["tier1"], - "spine": clos_2tier["groups"]["tier2"], + clos_2tier["nodes"] = { + "leaf": clos_2tier["nodes"]["tier1"], + "spine": clos_2tier["nodes"]["tier2"], } - clos_2tier["adjacency"][0]["source"] = "/leaf" - clos_2tier["adjacency"][0]["target"] = "/spine" + clos_2tier["links"][0]["source"] = "/leaf" + clos_2tier["links"][0]["target"] = "/spine" builder.builder.with_blueprint("clos_2tier", clos_2tier) # Create city_cloud blueprint that uses clos_2tier city_cloud = { - "groups": { + "nodes": { "clos_instance": { - "use_blueprint": "clos_2tier", - "parameters": { - "spine.node_count": 6, - "spine.name_template": "myspine-{node_num}", + "blueprint": "clos_2tier", + "params": { + "spine.count": 6, + "spine.template": "myspine-{n}", }, }, - "edge_nodes": {"node_count": 4, "name_template": "edge-{node_num}"}, + "edge_nodes": {"count": 4, "template": "edge-{n}"}, }, - "adjacency": [ + "links": [ { "source": "/clos_instance/leaf", "target": "/edge_nodes", "pattern": "mesh", - "link_params": {"capacity": 100, "cost": 1000}, + "capacity": 100, + "cost": 1000, } ], } @@ -423,7 +424,7 @@ def test_scenario_2_template_variant(self): # Create single_node blueprint single_node = BlueprintTemplates.simple_group_blueprint( - "single", 1, "single-{node_num}" + "single", 1, "single-{n}" ) builder.builder.with_blueprint("single_node", single_node) @@ -431,67 +432,78 @@ def test_scenario_2_template_variant(self): network_data = { "name": "scenario_2_template", "version": "1.1", - "groups": { - "SEA": {"use_blueprint": "city_cloud"}, - "SFO": {"use_blueprint": "single_node"}, + "nodes": { + "SEA": {"blueprint": "city_cloud"}, + "SFO": {"blueprint": "single_node"}, + "DEN": {}, + "DFW": {}, + "JFK": {}, + "DCA": {}, }, - "nodes": {"DEN": {}, "DFW": {}, "JFK": {}, "DCA": {}}, "links": [ { "source": "DEN", "target": "DFW", - "link_params": {"capacity": 400, "cost": 7102}, + "capacity": 400, + "cost": 7102, }, { "source": "DEN", "target": "DFW", - "link_params": {"capacity": 400, "cost": 7102}, + "capacity": 400, + "cost": 7102, }, { "source": "DEN", "target": "JFK", - "link_params": {"capacity": 200, "cost": 7500}, + "capacity": 200, + "cost": 7500, }, { "source": "DFW", "target": "DCA", - "link_params": {"capacity": 200, "cost": 8000}, + "capacity": 200, + "cost": 8000, }, { "source": "DFW", "target": "JFK", - "link_params": {"capacity": 200, "cost": 9500}, + "capacity": 200, + "cost": 9500, }, { "source": "JFK", "target": "DCA", - "link_params": {"capacity": 100, "cost": 1714}, + "capacity": 100, + "cost": 1714, }, - ], - "adjacency": [ { "source": "/SFO", "target": "/DEN", "pattern": "mesh", - "link_params": {"capacity": 100, "cost": 7754}, + "capacity": 100, + "cost": 7754, }, { "source": "/SFO", "target": "/DFW", "pattern": "mesh", - "link_params": {"capacity": 200, "cost": 10000}, + "capacity": 200, + "cost": 10000, }, { "source": "/SEA/edge_nodes", "target": "/DEN", "pattern": "mesh", - "link_params": {"capacity": 100, "cost": 6846}, + "capacity": 100, + "cost": 6846, }, { "source": "/SEA/edge_nodes", "target": "/DFW", "pattern": "mesh", - "link_params": {"capacity": 100, "cost": 9600}, + "capacity": 100, + "cost": 9600, }, ], } @@ -499,12 +511,12 @@ def test_scenario_2_template_variant(self): # Add traffic and failure policy same as scenario 1 demands = [ - {"source": "SEA", "sink": "JFK", "demand": 50}, - {"source": "SFO", "sink": "DCA", "demand": 50}, - {"source": "SEA", "sink": "DCA", "demand": 50}, - {"source": "SFO", "sink": "JFK", "demand": 50}, + {"source": "SEA", "target": "JFK", "volume": 50}, + {"source": "SFO", "target": "DCA", "volume": 50}, + {"source": "SEA", "target": "DCA", "volume": 50}, + {"source": "SFO", "target": "JFK", "volume": 50}, ] - builder.builder.data["traffic_matrix_set"] = {"default": demands} + builder.builder.data["demands"] = {"default": demands} policy = FailurePolicyTemplates.single_link_failure() policy["attrs"]["name"] = "anySingleLink" @@ -534,16 +546,17 @@ def test_scenario_3_template_variant(self): # Create brick_2tier blueprint brick_2tier = { - "groups": { - "t1": {"node_count": 4, "name_template": "t1-{node_num}"}, - "t2": {"node_count": 4, "name_template": "t2-{node_num}"}, + "nodes": { + "t1": {"count": 4, "template": "t1-{n}"}, + "t2": {"count": 4, "template": "t2-{n}"}, }, - "adjacency": [ + "links": [ { "source": "/t1", "target": "/t2", "pattern": "mesh", - "link_params": {"capacity": 2, "cost": 1}, + "capacity": 2, + "cost": 1, } ], } @@ -551,23 +564,25 @@ def test_scenario_3_template_variant(self): # Create 3tier_clos blueprint three_tier_clos = { - "groups": { - "b1": {"use_blueprint": "brick_2tier"}, - "b2": {"use_blueprint": "brick_2tier"}, - "spine": {"node_count": 16, "name_template": "t3-{node_num}"}, + "nodes": { + "b1": {"blueprint": "brick_2tier"}, + "b2": {"blueprint": "brick_2tier"}, + "spine": {"count": 16, "template": "t3-{n}"}, }, - "adjacency": [ + "links": [ { "source": "b1/t2", "target": "spine", "pattern": "one_to_one", - "link_params": {"capacity": 2, "cost": 1}, + "capacity": 2, + "cost": 1, }, { "source": "b2/t2", "target": "spine", "pattern": "one_to_one", - "link_params": {"capacity": 2, "cost": 1}, + "capacity": 2, + "cost": 1, }, ], } @@ -577,16 +592,17 @@ def test_scenario_3_template_variant(self): network_data = { "name": "scenario_3_template", "version": "1.0", - "groups": { - "my_clos1": {"use_blueprint": "3tier_clos"}, - "my_clos2": {"use_blueprint": "3tier_clos"}, + "nodes": { + "my_clos1": {"blueprint": "3tier_clos"}, + "my_clos2": {"blueprint": "3tier_clos"}, }, - "adjacency": [ + "links": [ { "source": "my_clos1/spine", "target": "my_clos2/spine", "pattern": "one_to_one", - "link_params": {"capacity": 2, "cost": 1}, + "capacity": 2, + "cost": 1, } ], } @@ -594,12 +610,12 @@ def test_scenario_3_template_variant(self): # Add capacity probe workflow workflow = [ - {"step_type": "BuildGraph", "name": "build_graph"}, + {"type": "BuildGraph", "name": "build_graph"}, { - "step_type": "MaxFlow", + "type": "MaxFlow", "name": "capacity_analysis", "source": "my_clos1/b.*/t1", - "sink": "my_clos2/b.*/t1", + "target": "my_clos2/b.*/t1", "mode": "combine", "shortest_path": True, "flow_placement": "PROPORTIONAL", @@ -607,10 +623,10 @@ def test_scenario_3_template_variant(self): "failure_policy": None, }, { - "step_type": "MaxFlow", + "type": "MaxFlow", "name": "capacity_analysis2", "source": "my_clos1/b.*/t1", - "sink": "my_clos2/b.*/t1", + "target": "my_clos2/b.*/t1", "mode": "combine", "shortest_path": True, "flow_placement": "EQUAL_BALANCED", diff --git a/tests/model/demand/test_builder.py b/tests/model/demand/test_builder.py index 7acd792..bce4dc2 100644 --- a/tests/model/demand/test_builder.py +++ b/tests/model/demand/test_builder.py @@ -3,223 +3,221 @@ import pytest from ngraph.model.demand.builder import ( - _coerce_flow_policy_config, - build_traffic_matrix_set, + _coerce_flow_policy, + build_demand_set, ) from ngraph.model.flow.policy_config import FlowPolicyPreset -def test_build_traffic_matrix_set_basic(): +def test_build_demand_set_basic(): """Test building a basic traffic matrix set.""" raw = { "tm1": [ { "source": "A", - "sink": "B", - "demand": 100.0, + "target": "B", + "volume": 100.0, } ] } - tms = build_traffic_matrix_set(raw) - assert "tm1" in tms.matrices - demands = tms.get_matrix("tm1") + tms = build_demand_set(raw) + assert "tm1" in tms.sets + demands = tms.get_set("tm1") assert len(demands) == 1 assert demands[0].source == "A" - assert demands[0].sink == "B" - assert demands[0].demand == 100.0 + assert demands[0].target == "B" + assert demands[0].volume == 100.0 -def test_build_traffic_matrix_set_multiple_matrices(): +def test_build_demand_set_multiple_matrices(): """Test building multiple traffic matrices.""" raw = { - "tm1": [{"source": "A", "sink": "B", "demand": 100.0}], - "tm2": [{"source": "C", "sink": "D", "demand": 200.0}], + "tm1": [{"source": "A", "target": "B", "volume": 100.0}], + "tm2": [{"source": "C", "target": "D", "volume": 200.0}], } - tms = build_traffic_matrix_set(raw) - assert "tm1" in tms.matrices - assert "tm2" in tms.matrices - assert len(tms.get_matrix("tm1")) == 1 - assert len(tms.get_matrix("tm2")) == 1 + tms = build_demand_set(raw) + assert "tm1" in tms.sets + assert "tm2" in tms.sets + assert len(tms.get_set("tm1")) == 1 + assert len(tms.get_set("tm2")) == 1 -def test_build_traffic_matrix_set_multiple_demands(): +def test_build_demand_set_multiple_demands(): """Test building traffic matrix with multiple demands.""" raw = { "tm1": [ - {"source": "A", "sink": "B", "demand": 100.0}, - {"source": "C", "sink": "D", "demand": 200.0}, + {"source": "A", "target": "B", "volume": 100.0}, + {"source": "C", "target": "D", "volume": 200.0}, ] } - tms = build_traffic_matrix_set(raw) - demands = tms.get_matrix("tm1") + tms = build_demand_set(raw) + demands = tms.get_set("tm1") assert len(demands) == 2 - assert demands[0].demand == 100.0 - assert demands[1].demand == 200.0 + assert demands[0].volume == 100.0 + assert demands[1].volume == 200.0 -def test_build_traffic_matrix_set_with_flow_policy_enum(): +def test_build_demand_set_with_flow_policy_enum(): """Test building with FlowPolicyPreset enum.""" raw = { "tm1": [ { "source": "A", - "sink": "B", - "demand": 100.0, - "flow_policy_config": FlowPolicyPreset.SHORTEST_PATHS_ECMP, + "target": "B", + "volume": 100.0, + "flow_policy": FlowPolicyPreset.SHORTEST_PATHS_ECMP, } ] } - tms = build_traffic_matrix_set(raw) - demands = tms.get_matrix("tm1") - assert demands[0].flow_policy_config == FlowPolicyPreset.SHORTEST_PATHS_ECMP + tms = build_demand_set(raw) + demands = tms.get_set("tm1") + assert demands[0].flow_policy == FlowPolicyPreset.SHORTEST_PATHS_ECMP -def test_build_traffic_matrix_set_with_flow_policy_string(): +def test_build_demand_set_with_flow_policy_string(): """Test building with FlowPolicyPreset as string.""" raw = { "tm1": [ { "source": "A", - "sink": "B", - "demand": 100.0, - "flow_policy_config": "SHORTEST_PATHS_ECMP", + "target": "B", + "volume": 100.0, + "flow_policy": "SHORTEST_PATHS_ECMP", } ] } - tms = build_traffic_matrix_set(raw) - demands = tms.get_matrix("tm1") - assert demands[0].flow_policy_config == FlowPolicyPreset.SHORTEST_PATHS_ECMP + tms = build_demand_set(raw) + demands = tms.get_set("tm1") + assert demands[0].flow_policy == FlowPolicyPreset.SHORTEST_PATHS_ECMP -def test_build_traffic_matrix_set_with_flow_policy_int(): +def test_build_demand_set_with_flow_policy_int(): """Test building with FlowPolicyPreset as integer.""" raw = { "tm1": [ { "source": "A", - "sink": "B", - "demand": 100.0, - "flow_policy_config": 1, + "target": "B", + "volume": 100.0, + "flow_policy": 1, } ] } - tms = build_traffic_matrix_set(raw) - demands = tms.get_matrix("tm1") - assert demands[0].flow_policy_config == FlowPolicyPreset.SHORTEST_PATHS_ECMP + tms = build_demand_set(raw) + demands = tms.get_set("tm1") + assert demands[0].flow_policy == FlowPolicyPreset.SHORTEST_PATHS_ECMP -def test_build_traffic_matrix_set_invalid_raw_type(): +def test_build_demand_set_invalid_raw_type(): """Test error handling for invalid raw type.""" with pytest.raises(ValueError, match="must be a mapping"): - build_traffic_matrix_set("not a dict") + build_demand_set("not a dict") with pytest.raises(ValueError, match="must be a mapping"): - build_traffic_matrix_set([]) + build_demand_set([]) -def test_build_traffic_matrix_set_invalid_matrix_value(): +def test_build_demand_set_invalid_matrix_value(): """Test error handling when matrix value is not a list.""" raw = {"tm1": "not a list"} with pytest.raises(ValueError, match="must map to a list"): - build_traffic_matrix_set(raw) + build_demand_set(raw) -def test_build_traffic_matrix_set_invalid_demand_type(): +def test_build_demand_set_invalid_demand_type(): """Test error handling when demand entry is not a dict.""" raw = {"tm1": ["not a dict"]} with pytest.raises(ValueError, match="must be dicts"): - build_traffic_matrix_set(raw) + build_demand_set(raw) -def test_coerce_flow_policy_config_none(): +def test_coerce_flow_policy_none(): """Test coercing None.""" - assert _coerce_flow_policy_config(None) is None + assert _coerce_flow_policy(None) is None -def test_coerce_flow_policy_config_enum(): +def test_coerce_flow_policy_enum(): """Test coercing FlowPolicyPreset enum.""" preset = FlowPolicyPreset.SHORTEST_PATHS_ECMP - assert _coerce_flow_policy_config(preset) == preset + assert _coerce_flow_policy(preset) == preset -def test_coerce_flow_policy_config_int(): +def test_coerce_flow_policy_int(): """Test coercing integer to enum.""" - assert _coerce_flow_policy_config(1) == FlowPolicyPreset.SHORTEST_PATHS_ECMP - assert _coerce_flow_policy_config(2) == FlowPolicyPreset.SHORTEST_PATHS_WCMP - assert _coerce_flow_policy_config(3) == FlowPolicyPreset.TE_WCMP_UNLIM - assert _coerce_flow_policy_config(4) == FlowPolicyPreset.TE_ECMP_UP_TO_256_LSP - assert _coerce_flow_policy_config(5) == FlowPolicyPreset.TE_ECMP_16_LSP + assert _coerce_flow_policy(1) == FlowPolicyPreset.SHORTEST_PATHS_ECMP + assert _coerce_flow_policy(2) == FlowPolicyPreset.SHORTEST_PATHS_WCMP + assert _coerce_flow_policy(3) == FlowPolicyPreset.TE_WCMP_UNLIM + assert _coerce_flow_policy(4) == FlowPolicyPreset.TE_ECMP_UP_TO_256_LSP + assert _coerce_flow_policy(5) == FlowPolicyPreset.TE_ECMP_16_LSP -def test_coerce_flow_policy_config_string(): +def test_coerce_flow_policy_string(): """Test coercing string to enum.""" assert ( - _coerce_flow_policy_config("SHORTEST_PATHS_ECMP") + _coerce_flow_policy("SHORTEST_PATHS_ECMP") == FlowPolicyPreset.SHORTEST_PATHS_ECMP ) assert ( - _coerce_flow_policy_config("shortest_paths_ecmp") + _coerce_flow_policy("shortest_paths_ecmp") == FlowPolicyPreset.SHORTEST_PATHS_ECMP ) assert ( - _coerce_flow_policy_config("SHORTEST_PATHS_WCMP") + _coerce_flow_policy("SHORTEST_PATHS_WCMP") == FlowPolicyPreset.SHORTEST_PATHS_WCMP ) - assert _coerce_flow_policy_config("TE_WCMP_UNLIM") == FlowPolicyPreset.TE_WCMP_UNLIM + assert _coerce_flow_policy("TE_WCMP_UNLIM") == FlowPolicyPreset.TE_WCMP_UNLIM assert ( - _coerce_flow_policy_config("TE_ECMP_UP_TO_256_LSP") + _coerce_flow_policy("TE_ECMP_UP_TO_256_LSP") == FlowPolicyPreset.TE_ECMP_UP_TO_256_LSP ) - assert ( - _coerce_flow_policy_config("TE_ECMP_16_LSP") == FlowPolicyPreset.TE_ECMP_16_LSP - ) + assert _coerce_flow_policy("TE_ECMP_16_LSP") == FlowPolicyPreset.TE_ECMP_16_LSP -def test_coerce_flow_policy_config_string_numeric(): +def test_coerce_flow_policy_string_numeric(): """Test coercing numeric string to enum.""" - assert _coerce_flow_policy_config("1") == FlowPolicyPreset.SHORTEST_PATHS_ECMP - assert _coerce_flow_policy_config("2") == FlowPolicyPreset.SHORTEST_PATHS_WCMP - assert _coerce_flow_policy_config("3") == FlowPolicyPreset.TE_WCMP_UNLIM + assert _coerce_flow_policy("1") == FlowPolicyPreset.SHORTEST_PATHS_ECMP + assert _coerce_flow_policy("2") == FlowPolicyPreset.SHORTEST_PATHS_WCMP + assert _coerce_flow_policy("3") == FlowPolicyPreset.TE_WCMP_UNLIM -def test_coerce_flow_policy_config_empty_string(): +def test_coerce_flow_policy_empty_string(): """Test coercing empty string.""" - assert _coerce_flow_policy_config("") is None - assert _coerce_flow_policy_config(" ") is None + assert _coerce_flow_policy("") is None + assert _coerce_flow_policy(" ") is None -def test_coerce_flow_policy_config_invalid_string(): +def test_coerce_flow_policy_invalid_string(): """Test error handling for invalid string.""" - with pytest.raises(ValueError, match="Unknown flow policy config"): - _coerce_flow_policy_config("INVALID_POLICY") + with pytest.raises(ValueError, match="Unknown flow policy"): + _coerce_flow_policy("INVALID_POLICY") -def test_coerce_flow_policy_config_invalid_numeric_string(): +def test_coerce_flow_policy_invalid_numeric_string(): """Test error handling for invalid numeric string.""" - with pytest.raises(ValueError, match="Unknown flow policy config value"): - _coerce_flow_policy_config("999") + with pytest.raises(ValueError, match="Unknown flow policy value"): + _coerce_flow_policy("999") -def test_coerce_flow_policy_config_invalid_int(): +def test_coerce_flow_policy_invalid_int(): """Test error handling for invalid integer.""" - with pytest.raises(ValueError, match="Unknown flow policy config value"): - _coerce_flow_policy_config(999) + with pytest.raises(ValueError, match="Unknown flow policy value"): + _coerce_flow_policy(999) -def test_coerce_flow_policy_config_other_types(): +def test_coerce_flow_policy_other_types(): """Test that other types are passed through unchanged.""" # Dict config for advanced usage dict_config = {"custom": "config"} - assert _coerce_flow_policy_config(dict_config) == dict_config + assert _coerce_flow_policy(dict_config) == dict_config # List (unusual but should pass through) list_config = ["a", "b"] - assert _coerce_flow_policy_config(list_config) == list_config + assert _coerce_flow_policy(list_config) == list_config diff --git a/tests/model/demand/test_spec.py b/tests/model/demand/test_spec.py index fa221ed..5324264 100644 --- a/tests/model/demand/test_spec.py +++ b/tests/model/demand/test_spec.py @@ -4,12 +4,12 @@ def test_defaults_and_id_generation() -> None: """TrafficDemand sets sane defaults and generates a unique, structured id.""" - demand = TrafficDemand(source="Src", sink="Dst") + demand = TrafficDemand(source="Src", target="Dst") # Defaults assert demand.priority == 0 - assert demand.demand == 0.0 - assert demand.demand_placed == 0.0 + assert demand.volume == 0.0 + assert demand.volume_placed == 0.0 assert demand.mode == "combine" assert demand.attrs == {} @@ -20,7 +20,7 @@ def test_defaults_and_id_generation() -> None: assert len(parts) == 3 assert all(parts) - demand2 = TrafficDemand(source="Src", sink="Dst") + demand2 = TrafficDemand(source="Src", target="Dst") assert demand2.id != demand.id @@ -29,23 +29,23 @@ def test_explicit_id_preserved() -> None: demand = TrafficDemand( id="my-explicit-id", source="Src", - sink="Dst", - demand=100.0, + target="Dst", + volume=100.0, ) assert demand.id == "my-explicit-id" def test_explicit_id_round_trip() -> None: """TrafficDemand ID survives serialization to dict and reconstruction.""" - original = TrafficDemand(source="A", sink="B", demand=50.0) + original = TrafficDemand(source="A", target="B", volume=50.0) original_id = original.id # Simulate serialization (as done in workflow steps) config = { "id": original.id, "source": original.source, - "sink": original.sink, - "demand": original.demand, + "target": original.target, + "volume": original.volume, "mode": original.mode, "priority": original.priority, } @@ -54,8 +54,8 @@ def test_explicit_id_round_trip() -> None: reconstructed = TrafficDemand( id=config.get("id"), source=config["source"], - sink=config["sink"], - demand=config["demand"], + target=config["target"], + volume=config["volume"], mode=config.get("mode", "pairwise"), priority=config.get("priority", 0), ) @@ -65,8 +65,8 @@ def test_explicit_id_round_trip() -> None: def test_attrs_isolation_between_instances() -> None: """Each instance gets its own attrs dict; mutating one does not affect others.""" - d1 = TrafficDemand(source="A", sink="B") - d2 = TrafficDemand(source="A", sink="B") + d1 = TrafficDemand(source="A", target="B") + d2 = TrafficDemand(source="A", target="B") d1.attrs["k"] = "v" assert d1.attrs == {"k": "v"} @@ -77,20 +77,20 @@ def test_custom_assignment_including_policy_config() -> None: """Custom field values are preserved, including mode and policy config.""" demand = TrafficDemand( source="SourceNode", - sink="TargetNode", + target="TargetNode", priority=5, - demand=42.5, - demand_placed=10.0, + volume=42.5, + volume_placed=10.0, attrs={"description": "test"}, mode="pairwise", - flow_policy_config=FlowPolicyConfig.SHORTEST_PATHS_ECMP, + flow_policy=FlowPolicyConfig.SHORTEST_PATHS_ECMP, ) assert demand.source == "SourceNode" - assert demand.sink == "TargetNode" + assert demand.target == "TargetNode" assert demand.priority == 5 - assert demand.demand == 42.5 - assert demand.demand_placed == 10.0 + assert demand.volume == 42.5 + assert demand.volume_placed == 10.0 assert demand.attrs == {"description": "test"} assert demand.mode == "pairwise" - assert demand.flow_policy_config == FlowPolicyConfig.SHORTEST_PATHS_ECMP + assert demand.flow_policy == FlowPolicyConfig.SHORTEST_PATHS_ECMP diff --git a/tests/model/failure/test_conditions_unit.py b/tests/model/failure/test_conditions_unit.py index 9393a9a..1c4061f 100644 --- a/tests/model/failure/test_conditions_unit.py +++ b/tests/model/failure/test_conditions_unit.py @@ -4,80 +4,64 @@ from ngraph.dsl.selectors import Condition, evaluate_condition, evaluate_conditions -# Use Condition directly (FailureCondition is just an alias) -FailureCondition = Condition - class TestEvaluateCondition: def test_equality_and_inequality(self) -> None: attrs = {"x": 5, "y": "abc"} - assert evaluate_condition(attrs, FailureCondition("x", "==", 5)) is True - assert evaluate_condition(attrs, FailureCondition("x", "!=", 6)) is True - assert evaluate_condition(attrs, FailureCondition("y", "==", "abc")) is True + assert evaluate_condition(attrs, Condition("x", "==", 5)) is True + assert evaluate_condition(attrs, Condition("x", "!=", 6)) is True + assert evaluate_condition(attrs, Condition("y", "==", "abc")) is True def test_ordering_with_none_guard(self) -> None: attrs = {"a": 3, "b": None} - assert evaluate_condition(attrs, FailureCondition("a", ">", 2)) is True - assert evaluate_condition(attrs, FailureCondition("a", ">=", 3)) is True - assert evaluate_condition(attrs, FailureCondition("a", "<", 10)) is True - assert evaluate_condition(attrs, FailureCondition("a", "<=", 3)) is True + assert evaluate_condition(attrs, Condition("a", ">", 2)) is True + assert evaluate_condition(attrs, Condition("a", ">=", 3)) is True + assert evaluate_condition(attrs, Condition("a", "<", 10)) is True + assert evaluate_condition(attrs, Condition("a", "<=", 3)) is True # None comparisons must return False rather than raising - assert evaluate_condition(attrs, FailureCondition("b", ">", 0)) is False - assert evaluate_condition(attrs, FailureCondition("missing", "<", 0)) is False + assert evaluate_condition(attrs, Condition("b", ">", 0)) is False + assert evaluate_condition(attrs, Condition("missing", "<", 0)) is False def test_contains_and_not_contains(self) -> None: attrs = {"s": "hello", "l": [1, 2, 3], "n": None, "i": 123} - assert ( - evaluate_condition(attrs, FailureCondition("s", "contains", "ell")) is True - ) - assert evaluate_condition(attrs, FailureCondition("l", "contains", 2)) is True - assert ( - evaluate_condition(attrs, FailureCondition("s", "not_contains", "xyz")) - is True - ) + assert evaluate_condition(attrs, Condition("s", "contains", "ell")) is True + assert evaluate_condition(attrs, Condition("l", "contains", 2)) is True + assert evaluate_condition(attrs, Condition("s", "not_contains", "xyz")) is True # None yields False for both contains and not_contains (can't evaluate on None) - assert evaluate_condition(attrs, FailureCondition("n", "contains", 1)) is False - assert ( - evaluate_condition(attrs, FailureCondition("n", "not_contains", 1)) is False - ) + assert evaluate_condition(attrs, Condition("n", "contains", 1)) is False + assert evaluate_condition(attrs, Condition("n", "not_contains", 1)) is False # Non-iterable: contains returns False, not_contains returns True - assert evaluate_condition(attrs, FailureCondition("i", "contains", 1)) is False - assert ( - evaluate_condition(attrs, FailureCondition("i", "not_contains", 1)) is True - ) + assert evaluate_condition(attrs, Condition("i", "contains", 1)) is False + assert evaluate_condition(attrs, Condition("i", "not_contains", 1)) is True - def test_any_value_and_no_value(self) -> None: + def test_exists_and_not_exists(self) -> None: attrs: dict[str, Any] = {"p": 0, "q": None} - assert evaluate_condition(attrs, FailureCondition("p", "any_value")) is True - # any_value with None returns False (attr must have non-None value) - assert evaluate_condition(attrs, FailureCondition("q", "any_value")) is False - assert ( - evaluate_condition(attrs, FailureCondition("missing", "any_value")) is False - ) - assert ( - evaluate_condition(attrs, FailureCondition("missing", "no_value")) is True - ) - assert evaluate_condition(attrs, FailureCondition("q", "no_value")) is True - assert evaluate_condition(attrs, FailureCondition("p", "no_value")) is False + assert evaluate_condition(attrs, Condition("p", "exists")) is True + # exists with None returns False (attr must have non-None value) + assert evaluate_condition(attrs, Condition("q", "exists")) is False + assert evaluate_condition(attrs, Condition("missing", "exists")) is False + assert evaluate_condition(attrs, Condition("missing", "not_exists")) is True + assert evaluate_condition(attrs, Condition("q", "not_exists")) is True + assert evaluate_condition(attrs, Condition("p", "not_exists")) is False def test_unsupported_operator_raises(self) -> None: with pytest.raises(ValueError, match="Invalid operator"): - FailureCondition("x", "bad") # type: ignore + Condition("x", "bad") # type: ignore class TestEvaluateConditions: def test_and_or_logic(self) -> None: attrs = {"x": 10, "y": "abc"} - conds = [FailureCondition("x", ">", 5), FailureCondition("y", "==", "abc")] + conds = [Condition("x", ">", 5), Condition("y", "==", "abc")] assert evaluate_conditions(attrs, conds, "and") is True assert evaluate_conditions(attrs, conds, "or") is True - conds2 = [FailureCondition("x", "<", 5), FailureCondition("y", "!=", "abc")] + conds2 = [Condition("x", "<", 5), Condition("y", "!=", "abc")] assert evaluate_conditions(attrs, conds2, "and") is False assert evaluate_conditions(attrs, conds2, "or") is False def test_unsupported_logic(self) -> None: # Need non-empty conditions to trigger logic check - conds = [FailureCondition("x", "==", 1)] + conds = [Condition("x", "==", 1)] with pytest.raises(ValueError, match="Unsupported logic"): evaluate_conditions({}, conds, "xor") diff --git a/tests/model/failure/test_failure_trace.py b/tests/model/failure/test_failure_trace.py index 6d29b74..db7d7d0 100644 --- a/tests/model/failure/test_failure_trace.py +++ b/tests/model/failure/test_failure_trace.py @@ -3,8 +3,8 @@ import pytest from ngraph.analysis.failure_manager import FailureManager +from ngraph.dsl.selectors.schema import Condition from ngraph.model.failure.policy import ( - FailureCondition, FailureMode, FailurePolicy, FailureRule, @@ -22,7 +22,7 @@ class TestFailureTracePolicyLevel: def test_trace_captures_mode_index(self) -> None: """Test that mode_index is correctly captured.""" - rule = FailureRule(entity_scope="node", rule_type="all") + rule = FailureRule(scope="node", mode="all") policy = FailurePolicy( modes=[ FailureMode(weight=0.0, rules=[]), # weight=0 never selected @@ -40,7 +40,7 @@ def test_trace_captures_mode_index(self) -> None: def test_trace_captures_mode_attrs(self) -> None: """Test that mode_attrs is a copy of the selected mode's attrs.""" attrs = {"severity": "high", "region": "west"} - rule = FailureRule(entity_scope="node", rule_type="all") + rule = FailureRule(scope="node", mode="all") policy = FailurePolicy( modes=[FailureMode(weight=1.0, rules=[rule], attrs=attrs)] ) @@ -55,9 +55,9 @@ def test_trace_captures_mode_attrs(self) -> None: def test_trace_captures_selection_fields(self) -> None: """Test that selections contain correct fields.""" rule = FailureRule( - entity_scope="node", - conditions=[FailureCondition(attr="type", operator="==", value="router")], - rule_type="choice", + scope="node", + conditions=[Condition(attr="type", op="==", value="router")], + mode="choice", count=1, ) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) @@ -73,19 +73,17 @@ def test_trace_captures_selection_fields(self) -> None: assert len(trace["selections"]) == 1 sel = trace["selections"][0] assert sel["rule_index"] == 0 - assert sel["entity_scope"] == "node" - assert sel["rule_type"] == "choice" + assert sel["scope"] == "node" + assert sel["mode"] == "choice" assert sel["matched_count"] == 2 # N1 and N2 matched assert len(sel["selected_ids"]) == 1 # count=1 def test_trace_empty_selections_when_no_match(self) -> None: """Test that rules matching nothing are not recorded.""" rule = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="type", operator="==", value="nonexistent") - ], - rule_type="all", + scope="node", + conditions=[Condition(attr="type", op="==", value="nonexistent")], + mode="all", ) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) @@ -112,12 +110,10 @@ def test_trace_captures_expansion_nodes_links(self) -> None: modes=[ FailureMode( weight=1.0, - rules=[ - FailureRule(entity_scope="node", rule_type="choice", count=1) - ], + rules=[FailureRule(scope="node", mode="choice", count=1)], ) ], - fail_risk_groups=True, + expand_groups=True, ) policy_choice.apply_failures(nodes, links, failure_trace=trace, seed=42) @@ -130,15 +126,13 @@ def test_trace_captures_expansion_risk_groups(self) -> None: """Test expansion tracking for risk group children.""" # Select only the parent, then expansion should add child rule = FailureRule( - entity_scope="risk_group", - conditions=[ - FailureCondition(attr="name", operator="==", value="parent_rg") - ], - rule_type="all", + scope="risk_group", + conditions=[Condition(attr="name", op="==", value="parent_rg")], + mode="all", ) policy = FailurePolicy( modes=[FailureMode(weight=1.0, rules=[rule])], - fail_risk_group_children=True, + expand_children=True, ) risk_groups = { @@ -165,7 +159,7 @@ def test_trace_no_modes_returns_null_mode_index(self) -> None: def test_trace_none_does_not_populate(self) -> None: """Test that passing failure_trace=None doesn't cause errors.""" - rule = FailureRule(entity_scope="node", rule_type="all") + rule = FailureRule(scope="node", mode="all") policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) # Should not raise @@ -174,7 +168,7 @@ def test_trace_none_does_not_populate(self) -> None: def test_trace_deterministic_with_seed(self) -> None: """Test that trace is deterministic with fixed seed.""" - rule = FailureRule(entity_scope="node", rule_type="choice", count=1) + rule = FailureRule(scope="node", mode="choice", count=1) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) nodes = {"N1": {}, "N2": {}, "N3": {}} @@ -229,7 +223,7 @@ class TestFailureTraceManagerIntegration: def test_results_include_trace_fields(self, simple_network: Network) -> None: """Test that results include trace fields when store_failure_patterns=True.""" - rule = FailureRule(entity_scope="node", rule_type="choice", count=1) + rule = FailureRule(scope="node", mode="choice", count=1) policy = FailurePolicy( modes=[FailureMode(weight=1.0, rules=[rule], attrs={"test": "attr"})] ) @@ -259,7 +253,7 @@ def mock_analysis(network, excluded_nodes, excluded_links, **kwargs): def test_baseline_has_no_trace_fields(self, simple_network: Network) -> None: """Test that baseline result doesn't have trace fields.""" - rule = FailureRule(entity_scope="node", rule_type="choice", count=1) + rule = FailureRule(scope="node", mode="choice", count=1) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) policy_set = FailurePolicySet() policy_set.policies["test"] = policy @@ -291,9 +285,9 @@ def test_deduplication_produces_unique_patterns( """Test that deduplicated iterations produce single unique result.""" # Use a deterministic policy that always produces same result rule = FailureRule( - entity_scope="node", - conditions=[FailureCondition(attr="type", operator="==", value="router")], - rule_type="all", # Always selects same nodes + scope="node", + conditions=[Condition(attr="type", op="==", value="router")], + mode="all", # Always selects same nodes ) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) policy_set = FailurePolicySet() @@ -321,7 +315,7 @@ def mock_analysis(network, excluded_nodes, excluded_links, **kwargs): def test_trace_deterministic_across_runs(self, simple_network: Network) -> None: """Test that trace is deterministic with fixed seed across runs.""" - rule = FailureRule(entity_scope="node", rule_type="choice", count=1) + rule = FailureRule(scope="node", mode="choice", count=1) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) policy_set = FailurePolicySet() policy_set.policies["test"] = policy @@ -357,7 +351,7 @@ def test_no_trace_when_store_failure_patterns_false( self, simple_network: Network ) -> None: """Test that trace is not captured when store_failure_patterns=False.""" - rule = FailureRule(entity_scope="node", rule_type="choice", count=1) + rule = FailureRule(scope="node", mode="choice", count=1) policy = FailurePolicy(modes=[FailureMode(weight=1.0, rules=[rule])]) policy_set = FailurePolicySet() policy_set.policies["test"] = policy diff --git a/tests/model/failure/test_policy.py b/tests/model/failure/test_policy.py index 18d7f94..9216e3b 100644 --- a/tests/model/failure/test_policy.py +++ b/tests/model/failure/test_policy.py @@ -2,8 +2,8 @@ import pytest +from ngraph.dsl.selectors.schema import Condition from ngraph.model.failure.policy import ( - FailureCondition, FailurePolicy, FailureRule, ) @@ -20,20 +20,20 @@ def test_failure_rule_invalid_probability(): # Test probability > 1.0 with pytest.raises(ValueError, match="probability=1.5 must be within \\[0,1\\]"): FailureRule( - entity_scope="node", - conditions=[FailureCondition(attr="type", operator="==", value="router")], + scope="node", + conditions=[Condition(attr="type", op="==", value="router")], logic="and", - rule_type="random", + mode="random", probability=1.5, ) # Test probability < 0.0 with pytest.raises(ValueError, match="probability=-0.1 must be within \\[0,1\\]"): FailureRule( - entity_scope="node", - conditions=[FailureCondition(attr="type", operator="==", value="router")], + scope="node", + conditions=[Condition(attr="type", op="==", value="router")], logic="and", - rule_type="random", + mode="random", probability=-0.1, ) @@ -43,8 +43,8 @@ def test_failure_policy_evaluate_conditions_or_logic(): from ngraph.dsl.selectors import evaluate_conditions conditions = [ - FailureCondition(attr="vendor", operator="==", value="cisco"), - FailureCondition(attr="location", operator="==", value="dallas"), + Condition(attr="vendor", op="==", value="cisco"), + Condition(attr="location", op="==", value="dallas"), ] # Should pass if either condition is true @@ -65,7 +65,7 @@ def test_failure_policy_evaluate_conditions_invalid_logic(): """Test condition evaluation with invalid logic via shared evaluate_conditions.""" from ngraph.dsl.selectors import evaluate_conditions - conditions = [FailureCondition(attr="vendor", operator="==", value="cisco")] + conditions = [Condition(attr="vendor", op="==", value="cisco")] attrs = {"vendor": "cisco"} with pytest.raises(ValueError, match="Unsupported logic: invalid"): @@ -73,14 +73,12 @@ def test_failure_policy_evaluate_conditions_invalid_logic(): def test_node_scope_all(): - """Rule with entity_scope='node' and rule_type='all' => fails all matched nodes.""" + """Rule with scope='node' and mode='all' => fails all matched nodes.""" rule = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="equipment_vendor", operator="==", value="cisco") - ], + scope="node", + conditions=[Condition(attr="equipment_vendor", op="==", value="cisco")], logic="and", - rule_type="all", + mode="all", ) policy = _single_mode_policy(rule) @@ -99,14 +97,12 @@ def test_node_scope_all(): def test_node_scope_random(): - """Rule with entity_scope='node' and rule_type='random' => random node failure.""" + """Rule with scope='node' and mode='random' => random node failure.""" rule = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="equipment_vendor", operator="==", value="cisco") - ], + scope="node", + conditions=[Condition(attr="equipment_vendor", op="==", value="cisco")], logic="and", - rule_type="random", + mode="random", probability=0.5, ) policy = _single_mode_policy(rule) @@ -129,14 +125,12 @@ def test_node_scope_random(): def test_node_scope_choice(): - """Rule with entity_scope='node' and rule_type='choice' => limited node failures.""" + """Rule with scope='node' and mode='choice' => limited node failures.""" rule = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="equipment_vendor", operator="==", value="cisco") - ], + scope="node", + conditions=[Condition(attr="equipment_vendor", op="==", value="cisco")], logic="and", - rule_type="choice", + mode="choice", count=1, ) policy = _single_mode_policy(rule) @@ -156,12 +150,12 @@ def test_node_scope_choice(): def test_link_scope_all(): - """Rule with entity_scope='link' and rule_type='all' => fails all matched links.""" + """Rule with scope='link' and mode='all' => fails all matched links.""" rule = FailureRule( - entity_scope="link", - conditions=[FailureCondition(attr="link_type", operator="==", value="fiber")], + scope="link", + conditions=[Condition(attr="link_type", op="==", value="fiber")], logic="and", - rule_type="all", + mode="all", ) policy = _single_mode_policy(rule) @@ -177,12 +171,12 @@ def test_link_scope_all(): def test_link_scope_random(): - """Rule with entity_scope='link' and rule_type='random' => random link failure.""" + """Rule with scope='link' and mode='random' => random link failure.""" rule = FailureRule( - entity_scope="link", - conditions=[FailureCondition(attr="link_type", operator="==", value="fiber")], + scope="link", + conditions=[Condition(attr="link_type", op="==", value="fiber")], logic="and", - rule_type="random", + mode="random", probability=0.4, ) policy = _single_mode_policy(rule) @@ -205,12 +199,12 @@ def test_link_scope_random(): def test_link_scope_choice(): - """Rule with entity_scope='link' and rule_type='choice' => limited link failures.""" + """Rule with scope='link' and mode='choice' => limited link failures.""" rule = FailureRule( - entity_scope="link", - conditions=[FailureCondition(attr="link_type", operator="==", value="fiber")], + scope="link", + conditions=[Condition(attr="link_type", op="==", value="fiber")], logic="and", - rule_type="choice", + mode="choice", count=1, ) policy = _single_mode_policy(rule) @@ -232,13 +226,13 @@ def test_link_scope_choice(): def test_complex_conditions_and_logic(): """Multiple conditions with 'and' logic.""" rule = FailureRule( - entity_scope="node", + scope="node", conditions=[ - FailureCondition(attr="equipment_vendor", operator="==", value="cisco"), - FailureCondition(attr="location", operator="==", value="dallas"), + Condition(attr="equipment_vendor", op="==", value="cisco"), + Condition(attr="location", op="==", value="dallas"), ], logic="and", - rule_type="all", + mode="all", ) policy = _single_mode_policy(rule) @@ -257,13 +251,13 @@ def test_complex_conditions_and_logic(): def test_complex_conditions_or_logic(): """Multiple conditions with 'or' logic.""" rule = FailureRule( - entity_scope="node", + scope="node", conditions=[ - FailureCondition(attr="equipment_vendor", operator="==", value="cisco"), - FailureCondition(attr="location", operator="==", value="critical_site"), + Condition(attr="equipment_vendor", op="==", value="cisco"), + Condition(attr="location", op="==", value="critical_site"), ], logic="or", - rule_type="all", + mode="all", ) policy = _single_mode_policy(rule) @@ -285,18 +279,16 @@ def test_complex_conditions_or_logic(): def test_multiple_rules(): """Policy with multiple rules affecting different entities.""" node_rule = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="equipment_vendor", operator="==", value="cisco") - ], + scope="node", + conditions=[Condition(attr="equipment_vendor", op="==", value="cisco")], logic="and", - rule_type="all", + mode="all", ) link_rule = FailureRule( - entity_scope="link", - conditions=[FailureCondition(attr="link_type", operator="==", value="fiber")], + scope="link", + conditions=[Condition(attr="link_type", op="==", value="fiber")], logic="and", - rule_type="all", + mode="all", ) from ngraph.model.failure.policy import FailureMode @@ -321,12 +313,10 @@ def test_condition_operators(): """Test various condition operators.""" # Test '!=' operator rule_neq = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="equipment_vendor", operator="!=", value="cisco") - ], + scope="node", + conditions=[Condition(attr="equipment_vendor", op="!=", value="cisco")], logic="and", - rule_type="all", + mode="all", ) policy_neq = _single_mode_policy(rule_neq) @@ -341,12 +331,10 @@ def test_condition_operators(): # Test missing attribute rule_missing = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="missing_attr", operator="==", value="some_value") - ], + scope="node", + conditions=[Condition(attr="missing_attr", op="==", value="some_value")], logic="and", - rule_type="all", + mode="all", ) policy_missing = _single_mode_policy(rule_missing) @@ -361,12 +349,12 @@ def test_condition_operators(): def test_serialization(): """Test policy serialization.""" - condition = FailureCondition(attr="equipment_vendor", operator="==", value="cisco") + condition = Condition(attr="equipment_vendor", op="==", value="cisco") rule = FailureRule( - entity_scope="node", + scope="node", conditions=[condition], logic="and", - rule_type="random", + mode="random", probability=0.2, count=3, ) @@ -380,28 +368,26 @@ def test_serialization(): assert len(mode_dict["rules"]) == 1 rule_dict = mode_dict["rules"][0] - assert rule_dict["entity_scope"] == "node" + assert rule_dict["scope"] == "node" assert rule_dict["logic"] == "and" - assert rule_dict["rule_type"] == "random" + assert rule_dict["mode"] == "random" assert rule_dict["probability"] == 0.2 assert rule_dict["count"] == 3 assert len(rule_dict["conditions"]) == 1 condition_dict = rule_dict["conditions"][0] assert condition_dict["attr"] == "equipment_vendor" - assert condition_dict["operator"] == "==" + assert condition_dict["op"] == "==" assert condition_dict["value"] == "cisco" def test_missing_attributes(): """Test behavior when entities don't have required attributes.""" rule = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="nonexistent_attr", operator="==", value="some_value") - ], + scope="node", + conditions=[Condition(attr="nonexistent_attr", op="==", value="some_value")], logic="and", - rule_type="all", + mode="all", ) policy = _single_mode_policy(rule) @@ -430,12 +416,10 @@ def test_empty_policy(): def test_empty_entities(): """Test policy applied to empty node/link sets.""" rule = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="equipment_vendor", operator="==", value="cisco") - ], + scope="node", + conditions=[Condition(attr="equipment_vendor", op="==", value="cisco")], logic="and", - rule_type="all", + mode="all", ) policy = _single_mode_policy(rule) diff --git a/tests/model/failure/test_policy_expansion.py b/tests/model/failure/test_policy_expansion.py index 8ab1a67..35a15b8 100644 --- a/tests/model/failure/test_policy_expansion.py +++ b/tests/model/failure/test_policy_expansion.py @@ -2,7 +2,8 @@ from __future__ import annotations -from ngraph.model.failure.policy import FailureCondition, FailurePolicy, FailureRule +from ngraph.dsl.selectors.schema import Condition +from ngraph.model.failure.policy import FailurePolicy, FailureRule def test_expand_by_shared_risk_groups() -> None: @@ -19,18 +20,16 @@ def test_expand_by_shared_risk_groups() -> None: # Rule fails N1 explicitly rule = FailureRule( - entity_scope="node", - conditions=[ - FailureCondition(attr="risk_groups", operator="contains", value="rg1") - ], + scope="node", + conditions=[Condition(attr="risk_groups", op="contains", value="rg1")], logic="and", - rule_type="all", + mode="all", ) from ngraph.model.failure.policy import FailureMode policy = FailurePolicy( - modes=[FailureMode(weight=1.0, rules=[rule])], fail_risk_groups=True + modes=[FailureMode(weight=1.0, rules=[rule])], expand_groups=True ) failed = set(policy.apply_failures(nodes, links)) @@ -47,15 +46,15 @@ def test_expand_failed_risk_group_children() -> None: # Rule selects top-level risk group name directly via risk_group scope rule = FailureRule( - entity_scope="risk_group", - conditions=[FailureCondition(attr="name", operator="==", value="parent")], + scope="risk_group", + conditions=[Condition(attr="name", op="==", value="parent")], logic="and", - rule_type="all", + mode="all", ) from ngraph.model.failure.policy import FailureMode policy = FailurePolicy( - modes=[FailureMode(weight=1.0, rules=[rule])], fail_risk_group_children=True + modes=[FailureMode(weight=1.0, rules=[rule])], expand_children=True ) # Risk group hierarchy as dicts (the policy supports dict objects for groups) diff --git a/tests/model/failure/test_policy_set.py b/tests/model/failure/test_policy_set.py index 0bfef39..fe4fce9 100644 --- a/tests/model/failure/test_policy_set.py +++ b/tests/model/failure/test_policy_set.py @@ -53,13 +53,13 @@ def test_to_dict_serialization(self): fps = FailurePolicySet() # Create a policy with some rules and attributes - rule = FailureRule(entity_scope="node", rule_type="choice", count=1) + rule = FailureRule(scope="node", mode="choice", count=1) from ngraph.model.failure.policy import FailureMode policy = FailurePolicy( modes=[FailureMode(weight=1.0, rules=[rule])], attrs={"name": "test_policy", "description": "Test policy"}, - fail_risk_groups=True, + expand_groups=True, ) fps.add("test", policy) @@ -69,7 +69,7 @@ def test_to_dict_serialization(self): assert "test" in result assert "modes" in result["test"] assert "attrs" in result["test"] - assert result["test"]["fail_risk_groups"] is True + assert result["test"]["expand_groups"] is True # Modes present assert "modes" in result["test"] and len(result["test"]["modes"]) == 1 @@ -77,8 +77,8 @@ def test_to_dict_serialization(self): mode = result["test"]["modes"][0] assert len(mode["rules"]) == 1 rule_dict = mode["rules"][0] - assert rule_dict["entity_scope"] == "node" - assert rule_dict["rule_type"] == "choice" + assert rule_dict["scope"] == "node" + assert rule_dict["mode"] == "choice" assert rule_dict["count"] == 1 def test_to_dict_multiple_policies(self): diff --git a/tests/model/failure/test_policy_weighted.py b/tests/model/failure/test_policy_weighted.py index 1146bc4..d52a755 100644 --- a/tests/model/failure/test_policy_weighted.py +++ b/tests/model/failure/test_policy_weighted.py @@ -6,8 +6,8 @@ def test_weighted_choice_uses_weight_by_and_excludes_zero_weight_items() -> None selection should return only positive-weight items regardless of RNG. """ rule = FailureRule( - entity_scope="link", - rule_type="choice", + scope="link", + mode="choice", count=2, weight_by="cost", ) @@ -31,8 +31,8 @@ def test_weighted_choice_fills_from_zero_when_insufficient_positive() -> None: uniformly from zero-weight items. """ rule = FailureRule( - entity_scope="link", - rule_type="choice", + scope="link", + mode="choice", count=2, weight_by="cost", ) @@ -59,9 +59,9 @@ def test_weighted_choice_fills_from_zero_when_insufficient_positive() -> None: def test_weighted_modes_selects_positive_weight_mode_only() -> None: """With one zero-weight and one positive-weight mode, selection must use the positive-weight mode.""" # Mode 0 (weight 0): link rule - link_rule = FailureRule(entity_scope="link", rule_type="choice", count=1) + link_rule = FailureRule(scope="link", mode="choice", count=1) # Mode 1 (weight 1): node rule - node_rule = FailureRule(entity_scope="node", rule_type="all") + node_rule = FailureRule(scope="node", mode="all") from ngraph.model.failure.policy import FailureMode diff --git a/tests/model/test_risk_group_generation.py b/tests/model/test_risk_group_generation.py index 1df8124..c176028 100644 --- a/tests/model/test_risk_group_generation.py +++ b/tests/model/test_risk_group_generation.py @@ -24,28 +24,25 @@ def test_generate_from_link_attribute(self) -> None: links: - source: NYC target: CHI - link_params: - attrs: - fiber: - path_id: "NYC-CHI" + attrs: + fiber: + path_id: "NYC-CHI" - source: CHI target: LA - link_params: - attrs: - fiber: - path_id: "CHI-LA" + attrs: + fiber: + path_id: "CHI-LA" - source: NYC target: LA - link_params: - attrs: - fiber: - path_id: "NYC-CHI" + attrs: + fiber: + path_id: "NYC-CHI" risk_groups: - generate: - entity_scope: link + scope: link group_by: fiber.path_id - name_template: Path_${value} + name: Path_${value} """ scenario = Scenario.from_yaml(yaml_content) @@ -79,9 +76,9 @@ def test_generate_from_node_attribute(self) -> None: risk_groups: - generate: - entity_scope: node + scope: node group_by: facility.building_id - name_template: Building_${value} + name: Building_${value} """ scenario = Scenario.from_yaml(yaml_content) @@ -104,22 +101,20 @@ def test_generate_with_attrs(self) -> None: links: - source: NYC target: CHI - link_params: - attrs: - fiber: - conduit_id: "NYC-CHI-C1" + attrs: + fiber: + conduit_id: "NYC-CHI-C1" - source: CHI target: NYC - link_params: - attrs: - fiber: - conduit_id: "NYC-CHI-C2" + attrs: + fiber: + conduit_id: "NYC-CHI-C2" risk_groups: - generate: - entity_scope: link + scope: link group_by: fiber.conduit_id - name_template: Conduit_${value} + name: Conduit_${value} attrs: type: fiber_conduit failure_probability: 0.001 @@ -155,9 +150,9 @@ def test_generate_with_nested_attribute(self) -> None: risk_groups: - generate: - entity_scope: node + scope: node group_by: facility.power_zone - name_template: PowerZone_${value} + name: PowerZone_${value} """ scenario = Scenario.from_yaml(yaml_content) @@ -191,9 +186,9 @@ def test_generate_no_matches(self) -> None: risk_groups: - generate: - entity_scope: node + scope: node group_by: facility.nonexistent - name_template: Missing_${value} + name: Missing_${value} """ scenario = Scenario.from_yaml(yaml_content) @@ -221,9 +216,9 @@ def test_generate_with_explicit_groups(self) -> None: risk_groups: - "Building_DC1_Manual" - generate: - entity_scope: node + scope: node group_by: facility.room_id - name_template: Room_${value} + name: Room_${value} """ scenario = Scenario.from_yaml(yaml_content) @@ -254,13 +249,13 @@ def test_generate_multiple_blocks(self) -> None: risk_groups: - generate: - entity_scope: node + scope: node group_by: facility.building_id - name_template: Building_${value} + name: Building_${value} - generate: - entity_scope: node + scope: node group_by: facility.power_zone - name_template: PowerZone_${value} + name: PowerZone_${value} """ scenario = Scenario.from_yaml(yaml_content) @@ -294,9 +289,9 @@ def test_generate_null_values_skipped(self) -> None: risk_groups: - generate: - entity_scope: node + scope: node group_by: facility.building_id - name_template: Building_${value} + name: Building_${value} """ scenario = Scenario.from_yaml(yaml_content) @@ -328,16 +323,16 @@ def test_missing_group_by(self) -> None: risk_groups: - generate: - entity_scope: node - name_template: Test_${value} + scope: node + name: Test_${value} """ with pytest.raises(jsonschema.ValidationError) as exc_info: Scenario.from_yaml(yaml_content) assert "group_by" in str(exc_info.value) - def test_missing_name_template(self) -> None: - """Error when name_template is missing (schema validation).""" + def test_missing_name(self) -> None: + """Error when name is missing (schema validation).""" import jsonschema yaml_content = """ @@ -347,16 +342,16 @@ def test_missing_name_template(self) -> None: risk_groups: - generate: - entity_scope: node + scope: node group_by: facility.building_id """ with pytest.raises(jsonschema.ValidationError) as exc_info: Scenario.from_yaml(yaml_content) - assert "name_template" in str(exc_info.value) + assert "'name' is a required property" in str(exc_info.value) - def test_name_template_without_placeholder(self) -> None: - """Error when name_template lacks ${value} placeholder.""" + def test_name_without_placeholder(self) -> None: + """Error when name lacks ${value} placeholder.""" yaml_content = """ network: nodes: @@ -367,9 +362,9 @@ def test_name_template_without_placeholder(self) -> None: risk_groups: - generate: - entity_scope: node + scope: node group_by: facility.building_id - name_template: StaticBuildingName + name: StaticBuildingName """ with pytest.raises(ValueError) as exc_info: Scenario.from_yaml(yaml_content) @@ -393,9 +388,9 @@ def test_generated_name_collision_with_explicit(self) -> None: explicit: true # Generate block that produces same name - generate: - entity_scope: node + scope: node group_by: facility.building_id - name_template: Building_${value} + name: Building_${value} """ with pytest.raises(ValueError) as exc_info: Scenario.from_yaml(yaml_content) @@ -417,13 +412,13 @@ def test_generated_name_collision_between_generate_blocks(self) -> None: risk_groups: - generate: - entity_scope: node + scope: node group_by: facility.building_id - name_template: Site_${value} + name: Site_${value} - generate: - entity_scope: node + scope: node group_by: facility.campus_id - name_template: Site_${value} + name: Site_${value} """ with pytest.raises(ValueError) as exc_info: Scenario.from_yaml(yaml_content) diff --git a/tests/model/test_risk_group_membership.py b/tests/model/test_risk_group_membership.py index 20d1ddd..bd26d2b 100644 --- a/tests/model/test_risk_group_membership.py +++ b/tests/model/test_risk_group_membership.py @@ -40,11 +40,11 @@ def test_node_membership_simple(self) -> None: risk_groups: - name: PowerZone_DC1_R1_PZA membership: - entity_scope: node + scope: node match: conditions: - attr: facility.power_zone - operator: "==" + op: "==" value: "DC1-R1-PZ-A" """ scenario = Scenario.from_yaml(yaml_content) @@ -75,25 +75,23 @@ def test_link_membership_simple(self) -> None: links: - source: NYC target: CHI - link_params: - attrs: - fiber: - conduit_id: "NYC-CHI-C1" + attrs: + fiber: + conduit_id: "NYC-CHI-C1" - source: CHI target: LA - link_params: - attrs: - fiber: - conduit_id: "CHI-LA-C1" + attrs: + fiber: + conduit_id: "CHI-LA-C1" risk_groups: - name: Conduit_NYC_CHI_C1 membership: - entity_scope: link + scope: link match: conditions: - attr: fiber.conduit_id - operator: "==" + op: "==" value: "NYC-CHI-C1" """ scenario = Scenario.from_yaml(yaml_content) @@ -127,11 +125,11 @@ def test_risk_group_hierarchy_membership(self) -> None: path_id: "NYC-LA" - name: Path_NYC_CHI membership: - entity_scope: risk_group + scope: risk_group match: conditions: - attr: fiber.path_id - operator: "==" + op: "==" value: "NYC-CHI" """ scenario = Scenario.from_yaml(yaml_content) @@ -172,15 +170,15 @@ def test_and_logic_all_must_match(self) -> None: risk_groups: - name: Room1_PowerZoneA membership: - entity_scope: node + scope: node match: logic: and conditions: - attr: facility.room_id - operator: "==" + op: "==" value: "DC1-R1" - attr: facility.power_zone - operator: "==" + op: "==" value: "DC1-R1-PZ-A" """ scenario = Scenario.from_yaml(yaml_content) @@ -210,35 +208,32 @@ def test_or_logic_any_can_match(self) -> None: links: - source: NYC target: CHI - link_params: - attrs: - fiber: - conduit_id: "NYC-CHI-C1" + attrs: + fiber: + conduit_id: "NYC-CHI-C1" - source: NYC target: LA - link_params: - attrs: - fiber: - conduit_id: "NYC-LA-C1" + attrs: + fiber: + conduit_id: "NYC-LA-C1" - source: CHI target: LA - link_params: - attrs: - fiber: - conduit_id: "CHI-LA-C1" + attrs: + fiber: + conduit_id: "CHI-LA-C1" risk_groups: - name: Path_NYC_Outbound membership: - entity_scope: link + scope: link match: logic: or conditions: - attr: fiber.conduit_id - operator: "==" + op: "==" value: "NYC-CHI-C1" - attr: fiber.conduit_id - operator: "==" + op: "==" value: "NYC-LA-C1" """ scenario = Scenario.from_yaml(yaml_content) @@ -279,11 +274,11 @@ def test_dot_notation_in_match(self) -> None: risk_groups: - name: Building_DC1 membership: - entity_scope: node + scope: node match: conditions: - attr: facility.building_id - operator: "==" + op: "==" value: "DC1" """ scenario = Scenario.from_yaml(yaml_content) @@ -323,11 +318,11 @@ def test_no_matches(self) -> None: risk_groups: - name: Building_DC99 membership: - entity_scope: node + scope: node match: conditions: - attr: facility.building_id - operator: "==" + op: "==" value: "DC99" """ scenario = Scenario.from_yaml(yaml_content) @@ -348,11 +343,11 @@ def test_self_reference_avoided(self) -> None: attrs: type: meta membership: - entity_scope: risk_group + scope: risk_group match: conditions: - attr: name - operator: any_value + op: exists """ scenario = Scenario.from_yaml(yaml_content) @@ -375,11 +370,11 @@ def test_membership_combines_with_explicit(self) -> None: - name: CoolingZone_DC1_R1_CZA - name: PowerZone_DC1_R1_PZA membership: - entity_scope: node + scope: node match: conditions: - attr: facility.power_zone - operator: "==" + op: "==" value: "DC1-R1-PZ-A" """ scenario = Scenario.from_yaml(yaml_content) @@ -403,25 +398,23 @@ def test_contains_operator(self) -> None: links: - source: NYC target: CHI - link_params: - attrs: - fiber: - pair_ids: ["FP01", "FP02", "FP03"] + attrs: + fiber: + pair_ids: ["FP01", "FP02", "FP03"] - source: CHI target: NYC - link_params: - attrs: - fiber: - pair_ids: ["FP04", "FP05"] + attrs: + fiber: + pair_ids: ["FP04", "FP05"] risk_groups: - name: FiberPair_FP01 membership: - entity_scope: link + scope: link match: conditions: - attr: fiber.pair_ids - operator: contains + op: contains value: "FP01" """ scenario = Scenario.from_yaml(yaml_content) @@ -454,11 +447,11 @@ def test_in_operator(self) -> None: risk_groups: - name: Campus_East membership: - entity_scope: node + scope: node match: conditions: - attr: facility.building_id - operator: in + op: in value: ["DC1", "DC2"] """ scenario = Scenario.from_yaml(yaml_content) @@ -478,31 +471,28 @@ def test_numeric_comparison(self) -> None: links: - source: NYC target: CHI - link_params: - attrs: - fiber: - distance_km: 1200 + attrs: + fiber: + distance_km: 1200 - source: NYC target: LA - link_params: - attrs: - fiber: - distance_km: 4000 + attrs: + fiber: + distance_km: 4000 - source: CHI target: LA - link_params: - attrs: - fiber: - distance_km: 2800 + attrs: + fiber: + distance_km: 2800 risk_groups: - name: LongHaulFiber membership: - entity_scope: link + scope: link match: conditions: - attr: fiber.distance_km - operator: ">=" + op: ">=" value: 2000 """ scenario = Scenario.from_yaml(yaml_content) @@ -538,22 +528,22 @@ def test_direct_mutual_membership_cycle_detected(self) -> None: type: path route: "NYC-CHI" membership: - entity_scope: risk_group + scope: risk_group match: conditions: - attr: route - operator: "==" + op: "==" value: "NYC-CHI" - name: Conduit_NYC_CHI_C1 attrs: type: conduit route: "NYC-CHI" membership: - entity_scope: risk_group + scope: risk_group match: conditions: - attr: type - operator: "==" + op: "==" value: path """ with pytest.raises(ValueError) as exc_info: @@ -576,31 +566,31 @@ def test_transitive_cycle_detected(self) -> None: attrs: tier: 1 membership: - entity_scope: risk_group + scope: risk_group match: conditions: - attr: tier - operator: "==" + op: "==" value: 3 - name: GroupB attrs: tier: 2 membership: - entity_scope: risk_group + scope: risk_group match: conditions: - attr: tier - operator: "==" + op: "==" value: 1 - name: GroupC attrs: tier: 3 membership: - entity_scope: risk_group + scope: risk_group match: conditions: - attr: tier - operator: "==" + op: "==" value: 2 """ with pytest.raises(ValueError) as exc_info: @@ -631,11 +621,11 @@ def test_valid_hierarchy_no_cycle(self) -> None: path_id: "NYC-LA" - name: Path_NYC_CHI membership: - entity_scope: risk_group + scope: risk_group match: conditions: - attr: fiber.path_id - operator: "==" + op: "==" value: "NYC-CHI" """ # Should not raise diff --git a/tests/scenario/test_scenario.py b/tests/scenario/test_scenario.py index e354741..69c890a 100644 --- a/tests/scenario/test_scenario.py +++ b/tests/scenario/test_scenario.py @@ -80,47 +80,45 @@ def valid_scenario_yaml() -> str: links: - source: NodeA target: NodeB - link_params: - capacity: 10 - cost: 5 - attrs: - type: link - some_attr: some_value + capacity: 10 + cost: 5 + attrs: + type: link + some_attr: some_value - source: NodeB target: NodeC - link_params: - capacity: 20 - cost: 4 - attrs: - type: link -failure_policy_set: + capacity: 20 + cost: 4 + attrs: + type: link +failures: default: attrs: name: "multi_rule_example" description: "Testing modal policy." - fail_risk_groups: false - fail_risk_group_children: false + expand_groups: false + expand_children: false modes: - weight: 1.0 rules: - - entity_scope: node - rule_type: "choice" + - scope: node + mode: "choice" count: 1 - - entity_scope: link - rule_type: "all" -traffic_matrix_set: + - scope: link + mode: "all" +demands: default: - source: NodeA - sink: NodeB - demand: 15 + target: NodeB + volume: 15 - source: NodeA - sink: NodeC - demand: 5 + target: NodeC + volume: 5 workflow: - - step_type: DoSmth + - type: DoSmth name: Step1 some_param: 42 - - step_type: DoSmthElse + - type: DoSmthElse name: Step2 factor: 2.0 """ @@ -129,7 +127,7 @@ def valid_scenario_yaml() -> str: @pytest.fixture def missing_step_type_yaml() -> str: """ - Returns a YAML string missing the 'step_type' in the workflow, + Returns a YAML string missing the 'type' in the workflow, which should raise a ValueError. """ return """ @@ -140,18 +138,17 @@ def missing_step_type_yaml() -> str: links: - source: NodeA target: NodeB - link_params: - capacity: 1 -failure_policy_set: + capacity: 1 +failures: default: modes: - weight: 1.0 rules: [] -traffic_matrix_set: +demands: default: - source: NodeA - sink: NodeB - demand: 10 + target: NodeB + volume: 10 workflow: - name: StepWithoutType some_param: 123 @@ -172,20 +169,19 @@ def unrecognized_step_type_yaml() -> str: links: - source: NodeA target: NodeB - link_params: - capacity: 1 -failure_policy_set: + capacity: 1 +failures: default: modes: - weight: 1.0 rules: [] -traffic_matrix_set: +demands: default: - source: NodeA - sink: NodeB - demand: 10 + target: NodeB + volume: 10 workflow: - - step_type: NonExistentStep + - type: NonExistentStep name: BadStep some_param: 999 """ @@ -205,16 +201,15 @@ def extra_param_yaml() -> str: links: - source: NodeA target: NodeB - link_params: - capacity: 1 -traffic_matrix_set: {} -failure_policy_set: + capacity: 1 +demands: {} +failures: default: modes: - weight: 1.0 rules: [] workflow: - - step_type: DoSmth + - type: DoSmth name: StepWithExtra some_param: 42 extra_param: 99 @@ -229,7 +224,7 @@ def minimal_scenario_yaml() -> str: """ return """ workflow: - - step_type: DoSmth + - type: DoSmth name: JustStep some_param: 100 """ @@ -290,8 +285,8 @@ def test_scenario_from_yaml_valid(valid_scenario_yaml: str) -> None: # Check failure policy - access by known name "default" simple_policy = scenario.failure_policy_set.get_policy("default") assert isinstance(simple_policy, FailurePolicy) - assert not simple_policy.fail_risk_groups - assert not simple_policy.fail_risk_group_children + assert not simple_policy.expand_groups + assert not simple_policy.expand_children assert len(simple_policy.modes) == 1 assert simple_policy.attrs.get("name") == "multi_rule_example" @@ -301,22 +296,22 @@ def test_scenario_from_yaml_valid(valid_scenario_yaml: str) -> None: mode = simple_policy.modes[0] assert len(mode.rules) == 2 r1, r2 = mode.rules - assert r1.entity_scope == "node" and r1.rule_type == "choice" and r1.count == 1 - assert r2.entity_scope == "link" and r2.rule_type == "all" + assert r1.scope == "node" and r1.mode == "choice" and r1.count == 1 + assert r2.scope == "link" and r2.mode == "all" # Check traffic matrix set - assert len(scenario.traffic_matrix_set.matrices) == 1 - assert "default" in scenario.traffic_matrix_set.matrices - default_demands = scenario.traffic_matrix_set.matrices["default"] + assert len(scenario.demand_set.sets) == 1 + assert "default" in scenario.demand_set.sets + default_demands = scenario.demand_set.sets["default"] assert len(default_demands) == 2 d1 = default_demands[0] d2 = default_demands[1] assert d1.source == "NodeA" - assert d1.sink == "NodeB" - assert d1.demand == 15 + assert d1.target == "NodeB" + assert d1.volume == 15 assert d2.source == "NodeA" - assert d2.sink == "NodeC" - assert d2.demand == 5 + assert d2.target == "NodeC" + assert d2.volume == 5 # Check workflow assert len(scenario.workflow) == 2 @@ -349,7 +344,7 @@ def test_scenario_run(valid_scenario_yaml: str) -> None: def test_scenario_from_yaml_missing_step_type(missing_step_type_yaml: str) -> None: """ Tests that Scenario.from_yaml raises an error if a workflow step - is missing the 'step_type' field. + is missing the 'type' field. Schema validation catches this and raises ValidationError. """ @@ -392,7 +387,7 @@ def test_scenario_minimal(minimal_scenario_yaml: str) -> None: # If no failure_policy_set block, scenario.failure_policy_set has no policies assert len(scenario.failure_policy_set.get_all_policies()) == 0 - assert len(scenario.traffic_matrix_set.matrices) == 0 + assert len(scenario.demand_set.sets) == 0 assert len(scenario.workflow) == 1 step = scenario.workflow[0] assert step.name == "JustStep" @@ -409,7 +404,7 @@ def test_scenario_empty_yaml(empty_yaml: str) -> None: assert len(scenario.network.nodes) == 0 assert len(scenario.network.links) == 0 assert len(scenario.failure_policy_set.get_all_policies()) == 0 - assert len(scenario.traffic_matrix_set.matrices) == 0 + assert len(scenario.demand_set.sets) == 0 assert scenario.workflow == [] @@ -427,19 +422,17 @@ def test_scenario_risk_groups() -> None: links: - source: NodeA target: NodeB - link_params: - capacity: 10 - cost: 5 - attrs: - type: link - some_attr: some_value + capacity: 10 + cost: 5 + attrs: + type: link + some_attr: some_value - source: NodeB target: NodeC - link_params: - capacity: 20 - cost: 4 - attrs: - type: link + capacity: 20 + cost: 4 + attrs: + type: link risk_groups: - name: "RG1" disabled: false @@ -500,10 +493,9 @@ def test_yaml_anchors_and_aliases(): links: - source: N1 target: N2 - link_params: - capacity: *default_cap + capacity: *default_cap -traffic_matrix_set: +demands: default: [] """ diff --git a/tests/scenario/test_scenario_modes.py b/tests/scenario/test_scenario_modes.py index e128fee..8b79a76 100644 --- a/tests/scenario/test_scenario_modes.py +++ b/tests/scenario/test_scenario_modes.py @@ -12,32 +12,31 @@ def test_scenario_parses_modes_and_weight_by() -> None: links: - source: A target: B - link_params: - capacity: 100 - cost: 10 + capacity: 100 + cost: 10 -failure_policy_set: - weighted_modes_v1: +failures: + weighted_modes: modes: - weight: 0.6 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 1 weight_by: "cost" - weight: 0.4 rules: - - entity_scope: "node" - rule_type: "choice" + - scope: "node" + mode: "choice" count: 1 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build """ scenario = Scenario.from_yaml(scenario_yaml) - policy = scenario.failure_policy_set.get_policy("weighted_modes_v1") + policy = scenario.failure_policy_set.get_policy("weighted_modes") # Ensure modes parsed and stored assert policy.modes and len(policy.modes) == 2 # Ensure weight_by propagated into rule diff --git a/tests/scenario/test_schema_validation.py b/tests/scenario/test_schema_validation.py index 0fdb4c5..dd5d27e 100644 --- a/tests/scenario/test_schema_validation.py +++ b/tests/scenario/test_schema_validation.py @@ -39,39 +39,37 @@ def test_schema_validates_simple_scenario(self, schema): links: - source: A target: B - link_params: - capacity: 1000.0 - cost: 10 - risk_groups: ["link_rg_1"] + capacity: 1000.0 + cost: 10 + risk_groups: ["link_rg_1"] - source: B target: C - link_params: - capacity: 1000.0 - cost: 10 - risk_groups: ["link_rg_2"] + capacity: 1000.0 + cost: 10 + risk_groups: ["link_rg_2"] risk_groups: - name: link_rg_1 - name: link_rg_2 -failure_policy_set: +failures: default: attrs: description: "Test single link failure policy" modes: - weight: 1.0 rules: - - entity_scope: "link" - rule_type: "choice" + - scope: "link" + mode: "choice" count: 1 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph - - step_type: CapacityEnvelopeAnalysis + - type: CapacityEnvelopeAnalysis name: capacity_test source: "A" - sink: "C" + target: "C" iterations: 1 baseline: false failure_policy: null @@ -147,7 +145,7 @@ def test_schema_validates_vars_section(self, schema): jsonschema.validate(valid_data, schema) def test_schema_validates_link_risk_groups(self, schema): - """Test that the schema validates risk_groups in link_params.""" + """Test that the schema validates risk_groups in links.""" valid_data = { "network": { "nodes": {"A": {}, "B": {}}, @@ -155,11 +153,9 @@ def test_schema_validates_link_risk_groups(self, schema): { "source": "A", "target": "B", - "link_params": { - "capacity": 100, - "cost": 1, - "risk_groups": ["rg1", "rg2"], - }, + "capacity": 100, + "cost": 1, + "risk_groups": ["rg1", "rg2"], } ], } @@ -172,7 +168,7 @@ def test_schema_validates_failure_policy_structure(self, schema): """Test that the schema validates failure policy structure.""" valid_data = { "network": {"nodes": {}, "links": []}, - "failure_policy_set": { + "failures": { "default": { "attrs": {"name": "test_policy"}, "modes": [ @@ -180,8 +176,8 @@ def test_schema_validates_failure_policy_structure(self, schema): "weight": 1.0, "rules": [ { - "entity_scope": "link", - "rule_type": "choice", + "scope": "link", + "mode": "choice", "count": 1, } ], @@ -199,47 +195,45 @@ def test_schema_validates_blueprints_and_groups(self, schema): blueprint_scenario = """ blueprints: clos_2tier: - groups: + nodes: leaf: - node_count: 4 - name_template: "leaf-{node_num}" + count: 4 + template: "leaf-{n}" attrs: role: "leaf" spine: - node_count: 2 - name_template: "spine-{node_num}" + count: 2 + template: "spine-{n}" attrs: role: "spine" - adjacency: + links: - source: "/leaf" target: "/spine" pattern: "mesh" - link_params: - capacity: 40000.0 - cost: 1000 + capacity: 40000.0 + cost: 1000 network: name: Blueprint Test Network - groups: + nodes: fabric: - use_blueprint: "clos_2tier" - parameters: - leaf.node_count: 6 + blueprint: "clos_2tier" + params: + leaf.count: 6 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph """ data = yaml.safe_load(blueprint_scenario) jsonschema.validate(data, schema) - def test_schema_validates_adjacency_selector_objects(self, schema): - """Adjacency with selector objects for source/target should validate.""" + def test_schema_validates_link_selector_objects(self, schema): + """Links with selector objects for source/target should validate.""" valid = { "network": { "nodes": {}, - "links": [], - "adjacency": [ + "links": [ { "source": { "path": "/A", @@ -248,7 +242,7 @@ def test_schema_validates_adjacency_selector_objects(self, schema): "conditions": [ { "attr": "role", - "operator": "==", + "op": "==", "value": "compute", } ], @@ -267,26 +261,25 @@ def test_schema_validates_node_link_overrides(self, schema): override_scenario = """ network: name: Override Test Network - groups: + nodes: servers: - node_count: 4 - name_template: "srv-{node_num}" - node_overrides: + count: 4 + template: "srv-{n}" + node_rules: - path: "srv-[12]" attrs: hw_type: "gpu_server" gpu_count: 8 risk_groups: ["gpu_srg"] - link_overrides: + link_rules: - source: "srv-1" target: "srv-2" - link_params: - capacity: 100000.0 - attrs: - link_type: "high_bandwidth" + capacity: 100000.0 + attrs: + link_type: "high_bandwidth" workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph """ data = yaml.safe_load(override_scenario) @@ -316,7 +309,7 @@ def test_schema_validates_components(self, schema): hw_component: "ToRSwitch48p" workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph """ data = yaml.safe_load(components_scenario) @@ -325,30 +318,31 @@ def test_schema_validates_components(self, schema): def test_schema_validates_complex_failure_policies(self, schema): """Test that the schema validates complex failure policies with conditions.""" complex_failure_scenario = """ -failure_policy_set: +failures: conditional_failure: modes: - weight: 1.0 rules: - - entity_scope: "node" - rule_type: "choice" + - scope: "node" + mode: "choice" count: 2 - conditions: - - attr: "attrs.role" - operator: "==" - value: "spine" - - attr: "attrs.criticality" - operator: ">=" - value: 5 - logic: "and" + match: + logic: "and" + conditions: + - attr: "attrs.role" + op: "==" + value: "spine" + - attr: "attrs.criticality" + op: ">=" + value: 5 risk_group_failure: - fail_risk_groups: true - fail_risk_group_children: true + expand_groups: true + expand_children: true modes: - weight: 1.0 rules: - - entity_scope: "risk_group" - rule_type: "choice" + - scope: "risk_group" + mode: "choice" count: 1 network: @@ -364,7 +358,7 @@ def test_schema_validates_complex_failure_policies(self, schema): criticality: 3 workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph """ data = yaml.safe_load(complex_failure_scenario) @@ -373,21 +367,21 @@ def test_schema_validates_complex_failure_policies(self, schema): def test_schema_validates_traffic_matrices(self, schema): """Test that the schema validates complex traffic matrices.""" traffic_scenario = """ -traffic_matrix_set: +demands: default: - source: "^spine.*" - sink: "^leaf.*" - demand: 1000.0 + target: "^leaf.*" + volume: 1000.0 mode: "combine" priority: 1 attrs: traffic_type: "north_south" hpc_workload: - source: "compute.*" - sink: "storage.*" - demand: 5000.0 + target: "storage.*" + volume: 5000.0 mode: "pairwise" - flow_policy_config: + flow_policy: shortest_path: false flow_placement: "EQUAL_BALANCED" @@ -400,12 +394,12 @@ def test_schema_validates_traffic_matrices(self, schema): storage1: {} workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph - - step_type: CapacityEnvelopeAnalysis + - type: CapacityEnvelopeAnalysis name: capacity_test source: "spine1" - sink: "leaf1" + target: "leaf1" iterations: 1 baseline: false failure_policy: null @@ -415,36 +409,36 @@ def test_schema_validates_traffic_matrices(self, schema): jsonschema.validate(data, schema) def test_schema_validates_variable_expansion(self, schema): - """Test that the schema validates variable expansion in adjacency.""" + """Test that the schema validates variable expansion in links.""" expansion_scenario = """ blueprints: datacenter: - groups: + nodes: rack: - node_count: 4 - name_template: "rack{rack_id}-{node_num}" + count: 4 + template: "rack{rack_id}-{n}" spine: - node_count: 2 - name_template: "spine-{node_num}" - adjacency: + count: 2 + template: "spine-{n}" + links: - source: "/rack" target: "/spine" pattern: "mesh" - expand_vars: - rack_id: [1, 2, 3] - expansion_mode: "cartesian" - link_params: - capacity: 25000.0 - cost: 1 + expand: + vars: + rack_id: [1, 2, 3] + mode: "cartesian" + capacity: 25000.0 + cost: 1 network: name: Variable Expansion Test - groups: + nodes: dc1: - use_blueprint: "datacenter" + blueprint: "datacenter" workflow: - - step_type: BuildGraph + - type: BuildGraph name: build_graph """ data = yaml.safe_load(expansion_scenario) @@ -462,16 +456,15 @@ def test_schema_consistency_with_netgraph_validation(self, schema): links: - source: A target: B - link_params: - capacity: 100 - cost: 1 - risk_groups: ["test_rg"] + capacity: 100 + cost: 1 + risk_groups: ["test_rg"] risk_groups: - name: test_rg workflow: - - step_type: BuildGraph + - type: BuildGraph name: build """ data = yaml.safe_load(valid_yaml) diff --git a/tests/utils/test_boolean_keys.py b/tests/utils/test_boolean_keys.py index 02cd62c..39c989b 100644 --- a/tests/utils/test_boolean_keys.py +++ b/tests/utils/test_boolean_keys.py @@ -85,57 +85,57 @@ def test_yaml_boolean_keys_converted_to_strings(): yml = textwrap.dedent(""" network: name: test - traffic_matrix_set: + demands: # Regular string key peak: - source: "^A$" - sink: "^B$" - demand: 100 + target: "^B$" + volume: 100 # YAML 1.1 boolean keys - these get parsed as Python booleans true: - source: "^C$" - sink: "^D$" - demand: 200 + target: "^D$" + volume: 200 false: - source: "^E$" - sink: "^F$" - demand: 50 + target: "^F$" + volume: 50 yes: - source: "^G$" - sink: "^H$" - demand: 25 + target: "^H$" + volume: 25 no: - source: "^I$" - sink: "^J$" - demand: 75 + target: "^J$" + volume: 75 on: - source: "^K$" - sink: "^L$" - demand: 150 + target: "^L$" + volume: 150 off: - source: "^M$" - sink: "^N$" - demand: 125 + target: "^N$" + volume: 125 """) scenario = Scenario.from_yaml(yml) - matrices = scenario.traffic_matrix_set.matrices + matrices = scenario.demand_set.sets # All YAML boolean values collapse to just True/False, then converted to strings assert set(matrices.keys()) == {"peak", "True", "False"} # Regular string key - assert matrices["peak"][0].demand == 100 + assert matrices["peak"][0].volume == 100 # All true-like YAML values become "True" matrix # NOTE: When multiple YAML keys collapse to the same boolean value, # only the last one wins (standard YAML/dict behavior) - true_demands = {d.demand for d in matrices["True"]} + true_demands = {d.volume for d in matrices["True"]} assert true_demands == {150} # from 'on:', the last true-like key # All false-like YAML values become "False" matrix - false_demands = {d.demand for d in matrices["False"]} + false_demands = {d.volume for d in matrices["False"]} assert false_demands == {125} # from 'off:', the last false-like key @@ -144,26 +144,26 @@ def test_quoted_boolean_keys_remain_strings(): yml = textwrap.dedent(""" network: name: test - traffic_matrix_set: + demands: "true": - source: "^A$" - sink: "^B$" - demand: 100 + target: "^B$" + volume: 100 "false": - source: "^C$" - sink: "^D$" - demand: 200 + target: "^D$" + volume: 200 "off": - source: "^E$" - sink: "^F$" - demand: 300 + target: "^F$" + volume: 300 """) scenario = Scenario.from_yaml(yml) - matrices = scenario.traffic_matrix_set.matrices + matrices = scenario.demand_set.sets # Quoted keys should remain as strings, not be converted to booleans assert set(matrices.keys()) == {"true", "false", "off"} - assert matrices["true"][0].demand == 100 - assert matrices["false"][0].demand == 200 - assert matrices["off"][0].demand == 300 + assert matrices["true"][0].volume == 100 + assert matrices["false"][0].volume == 200 + assert matrices["off"][0].volume == 300 diff --git a/tests/workflow/test_capacity_envelope_analysis.py b/tests/workflow/test_capacity_envelope_analysis.py index f252b99..34ad923 100644 --- a/tests/workflow/test_capacity_envelope_analysis.py +++ b/tests/workflow/test_capacity_envelope_analysis.py @@ -29,8 +29,8 @@ def simple_network() -> Network: def simple_failure_policy() -> FailurePolicy: """Create a simple failure policy that fails one link.""" rule = FailureRule( - entity_scope="link", - rule_type="choice", + scope="link", + mode="choice", count=1, ) return FailurePolicy( @@ -62,10 +62,10 @@ class TestMaxFlowStep: def test_initialization_defaults(self): """Test MaxFlow initialization with defaults.""" - step = MaxFlow(source="^A", sink="^C") + step = MaxFlow(source="^A", target="^C") assert step.source == "^A" - assert step.sink == "^C" + assert step.target == "^C" assert step.mode == "combine" assert step.failure_policy is None assert step.iterations == 1 @@ -80,7 +80,7 @@ def test_initialization_custom_values(self): """Test MaxFlow initialization with custom values.""" step = MaxFlow( source="^src", - sink="^dst", + target="^dst", mode="pairwise", failure_policy="test_policy", iterations=100, @@ -93,7 +93,7 @@ def test_initialization_custom_values(self): ) assert step.source == "^src" - assert step.sink == "^dst" + assert step.target == "^dst" assert step.mode == "pairwise" assert step.failure_policy == "test_policy" assert step.iterations == 100 @@ -107,18 +107,18 @@ def test_initialization_custom_values(self): def test_validation_errors(self): """Test parameter validation.""" with pytest.raises(ValueError, match="iterations must be >= 0"): - MaxFlow(source="^A", sink="^C", iterations=-1) + MaxFlow(source="^A", target="^C", iterations=-1) with pytest.raises(ValueError, match="parallelism must be >= 1"): - MaxFlow(source="^A", sink="^C", parallelism=0) + MaxFlow(source="^A", target="^C", parallelism=0) with pytest.raises(ValueError, match="mode must be 'combine' or 'pairwise'"): - MaxFlow(source="^A", sink="^C", mode="invalid") + MaxFlow(source="^A", target="^C", mode="invalid") def test_flow_placement_enum_usage(self): """Test that FlowPlacement enum is used correctly.""" step = MaxFlow( - source="^A", sink="^C", flow_placement=FlowPlacement.PROPORTIONAL + source="^A", target="^C", flow_placement=FlowPlacement.PROPORTIONAL ) assert step.flow_placement == FlowPlacement.PROPORTIONAL @@ -166,7 +166,7 @@ def test_run_with_mock_failure_manager( # Create and run the step step = MaxFlow( source="^A", - sink="^C", + target="^C", failure_policy="test_policy", iterations=1, parallelism=1, @@ -184,7 +184,7 @@ def test_run_with_mock_failure_manager( # Verify convenience method was called with correct parameters _, kwargs = mock_failure_manager.run_max_flow_monte_carlo.call_args assert kwargs["source"] == "^A" - assert kwargs["sink"] == "^C" + assert kwargs["target"] == "^C" assert kwargs["mode"] == "combine" assert kwargs["iterations"] == 1 assert kwargs["parallelism"] == 1 @@ -243,7 +243,7 @@ def test_run_with_failure_patterns(self, mock_failure_manager_class, mock_scenar # Create and run the step with failure pattern storage step = MaxFlow( source="^A", - sink="^C", + target="^C", iterations=2, store_failure_patterns=True, parallelism=1, @@ -259,7 +259,7 @@ def test_capacity_envelope_with_failures_mocked(self): """Test capacity envelope step with mocked FailureManager.""" step = MaxFlow( source="^A", - sink="^C", + target="^C", mode="combine", iterations=2, parallelism=1, @@ -369,7 +369,7 @@ def test_include_flow_summary_functionality( # Test with include_flow_details=True step = MaxFlow( source="^A", - sink="^C", + target="^C", iterations=1, include_flow_details=True, parallelism=1, @@ -399,8 +399,8 @@ def test_failure_trace_persisted_on_results( "selections": [ { "rule_index": 0, - "entity_scope": "link", - "rule_type": "choice", + "scope": "link", + "mode": "choice", "matched_count": 3, "selected_ids": ["link1"], } @@ -450,7 +450,7 @@ def test_failure_trace_persisted_on_results( step = MaxFlow( name="test_step", source="^A", - sink="^C", + target="^C", iterations=2, store_failure_patterns=True, parallelism=1, @@ -506,7 +506,7 @@ def test_no_failure_trace_when_disabled( step = MaxFlow( name="test_step_disabled", source="^A", - sink="^C", + target="^C", iterations=1, store_failure_patterns=False, parallelism=1, diff --git a/tests/workflow/test_maximum_supported_demand.py b/tests/workflow/test_maximum_supported_demand.py index 4cd489f..b237606 100644 --- a/tests/workflow/test_maximum_supported_demand.py +++ b/tests/workflow/test_maximum_supported_demand.py @@ -12,12 +12,12 @@ def _mock_scenario_with_matrix() -> MagicMock: mock_scenario = MagicMock() td = MagicMock() td.source = "A" - td.sink = "B" - td.demand = 10.0 + td.target = "B" + td.volume = 10.0 td.mode = "pairwise" td.priority = 0 - td.flow_policy_config = None - mock_scenario.traffic_matrix_set.get_matrix.return_value = [td] + td.flow_policy = None + mock_scenario.demand_set.get_set.return_value = [td] return mock_scenario @@ -43,7 +43,7 @@ def _eval(cache, alpha, seeds): scenario = _mock_scenario_with_matrix() step = MaximumSupportedDemand( name="msd_step", - matrix_name="default", + demand_set="default", alpha_start=1.0, growth_factor=2.0, resolution=0.01, @@ -80,7 +80,7 @@ def _eval(cache, alpha, seeds): scenario = _mock_scenario_with_matrix() step = MaximumSupportedDemand( name="msd_step", - matrix_name="default", + demand_set="default", alpha_start=1.0, growth_factor=2.0, resolution=0.01, @@ -105,13 +105,13 @@ def test_msd_end_to_end_single_link() -> None: ScenarioDataBuilder() .with_simple_nodes(["A", "B"]) .with_simple_links([("A", "B", 10.0)]) - .with_traffic_demand("A", "B", 5.0, matrix_name="default") + .with_traffic_demand("A", "B", 5.0, demand_set="default") .build_scenario() ) step = MaximumSupportedDemand( name="msd_e2e", - matrix_name="default", + demand_set="default", alpha_start=1.0, growth_factor=2.0, resolution=0.01, @@ -136,11 +136,11 @@ def test_msd_end_to_end_single_link() -> None: { "id": d.id, "source": d.source, - "sink": d.sink, - "demand": d.demand, + "target": d.target, + "volume": d.volume, "mode": d.mode, "priority": d.priority, - "flow_policy_config": d.flow_policy_config, + "flow_policy": d.flow_policy, } for d in scaled_demands ] @@ -163,11 +163,11 @@ def test_msd_end_to_end_single_link() -> None: { "id": d.id, "source": d.source, - "sink": d.sink, - "demand": d.demand, + "target": d.target, + "volume": d.volume, "mode": d.mode, "priority": d.priority, - "flow_policy_config": d.flow_policy_config, + "flow_policy": d.flow_policy, } for d in scaled_demands_above ] @@ -196,13 +196,13 @@ def test_msd_auto_vs_one_equivalence_single_link() -> None: ScenarioDataBuilder() .with_simple_nodes(["A", "B"]) .with_simple_links([("A", "B", 10.0)]) - .with_traffic_demand("A", "B", 5.0, matrix_name="default") + .with_traffic_demand("A", "B", 5.0, demand_set="default") .build_scenario() ) step_auto = MSD( name="msd_auto", - matrix_name="default", + demand_set="default", alpha_start=1.0, growth_factor=2.0, resolution=0.01, @@ -211,7 +211,7 @@ def test_msd_auto_vs_one_equivalence_single_link() -> None: ) step_one = MSD( name="msd_one", - matrix_name="default", + demand_set="default", alpha_start=1.0, growth_factor=2.0, resolution=0.01, diff --git a/tests/workflow/test_msd_perf_safety.py b/tests/workflow/test_msd_perf_safety.py index 33aefa4..ea5be38 100644 --- a/tests/workflow/test_msd_perf_safety.py +++ b/tests/workflow/test_msd_perf_safety.py @@ -6,16 +6,16 @@ class _ScenarioStub: - def __init__(self, network: Any, tmset: Any, results: Any) -> None: + def __init__(self, network: Any, demand_set: Any, results: Any) -> None: self.network = network - self.traffic_matrix_set = tmset + self.demand_set = demand_set self.results = results self._execution_counter = 0 def test_msd_reuse_tm_across_seeds_is_behaviorally_identical(monkeypatch): # Build a tiny scenario - from ngraph.model.demand.matrix import TrafficMatrixSet + from ngraph.model.demand.matrix import DemandSet from ngraph.model.demand.spec import TrafficDemand from ngraph.model.network import Link, Network, Node @@ -25,19 +25,19 @@ def test_msd_reuse_tm_across_seeds_is_behaviorally_identical(monkeypatch): net.add_link(Link("A", "B", capacity=5, cost=1)) net.add_link(Link("B", "C", capacity=5, cost=1)) - tmset = TrafficMatrixSet() - tmset.add( + demand_set = DemandSet() + demand_set.add( "default", - [TrafficDemand(source="A", sink="C", demand=2.0, mode="pairwise")], + [TrafficDemand(source="A", target="C", volume=2.0, mode="pairwise")], ) from ngraph.results.store import Results - scenario = _ScenarioStub(net, tmset, Results()) + scenario = _ScenarioStub(net, demand_set, Results()) # Run MSD with seeds=2; this exercises repeated evaluation within one TM build msd = MaximumSupportedDemand( - matrix_name="default", + demand_set="default", seeds_per_alpha=2, alpha_start=1.0, growth_factor=2.0, @@ -49,7 +49,7 @@ def test_msd_reuse_tm_across_seeds_is_behaviorally_identical(monkeypatch): msd.name = "msd" msd.execute(scenario) - # Expect alpha_star >= 1 because demand=2 fits capacity 5 + # Expect alpha_star >= 1 because volume=2 fits capacity 5 exported = scenario.results.to_dict() alpha_star = exported["steps"]["msd"]["data"].get("alpha_star") assert alpha_star is not None diff --git a/tests/workflow/test_namespace_alignment.py b/tests/workflow/test_namespace_alignment.py index 9a54039..a1c5613 100644 --- a/tests/workflow/test_namespace_alignment.py +++ b/tests/workflow/test_namespace_alignment.py @@ -14,7 +14,7 @@ def test_metadata_aligns_with_results_for_empty_name() -> None: B: {} workflow: - - step_type: BuildGraph + - type: BuildGraph """ scenario = Scenario.from_yaml(yaml_content) diff --git a/tests/workflow/test_network_stats.py b/tests/workflow/test_network_stats.py index 6cea7ba..d325a4f 100644 --- a/tests/workflow/test_network_stats.py +++ b/tests/workflow/test_network_stats.py @@ -140,7 +140,3 @@ def test_network_stats_with_exclusions(mock_scenario): # Should exclude node A and its links assert data["node_count"] == 2 # B, C (excluding A) assert data["link_count"] == 0 # All links connect to A, so none remain - - -# (Removed backward-compatibility param duplication; covered by explicit -# include_disabled default behavior in other tests.) diff --git a/tests/workflow/test_tm_analysis_perf_safety.py b/tests/workflow/test_tm_analysis_perf_safety.py index af88589..9ab6960 100644 --- a/tests/workflow/test_tm_analysis_perf_safety.py +++ b/tests/workflow/test_tm_analysis_perf_safety.py @@ -9,18 +9,18 @@ class _ScenarioStub: def __init__( - self, network: Any, tmset: Any, results: Any, failure_policy_set: Any + self, network: Any, demand_set: Any, results: Any, failures: Any ) -> None: self.network = network - self.traffic_matrix_set = tmset + self.demand_set = demand_set self.results = results - self.failure_policy_set = failure_policy_set + self.failure_policy_set = failures self._execution_counter = 0 def test_tm_basic_behavior_unchanged(monkeypatch): # Small sanity test that the step runs end-to-end and stores outputs - from ngraph.model.demand.matrix import TrafficMatrixSet + from ngraph.model.demand.matrix import DemandSet from ngraph.model.demand.spec import TrafficDemand from ngraph.model.network import Link, Network, Node @@ -30,10 +30,10 @@ def test_tm_basic_behavior_unchanged(monkeypatch): net.add_link(Link("A", "B", capacity=5, cost=1)) net.add_link(Link("B", "C", capacity=5, cost=1)) - tmset = TrafficMatrixSet() - tmset.add( + demand_set = DemandSet() + demand_set.add( "default", - [TrafficDemand(source="A", sink="C", demand=2.0, mode="pairwise")], + [TrafficDemand(source="A", target="C", volume=2.0, mode="pairwise")], ) class _ResultsStore: @@ -60,10 +60,10 @@ def get_all_step_metadata(self): class _FailurePolicySetStub: pass - scenario = _ScenarioStub(net, tmset, _ResultsStore(), _FailurePolicySetStub()) + scenario = _ScenarioStub(net, demand_set, _ResultsStore(), _FailurePolicySetStub()) step = TrafficMatrixPlacement( - matrix_name="default", + demand_set="default", iterations=2, placement_rounds="auto", include_flow_details=False, diff --git a/tests/workflow/test_traffic_matrix_placement.py b/tests/workflow/test_traffic_matrix_placement.py index a16802a..2b756c4 100644 --- a/tests/workflow/test_traffic_matrix_placement.py +++ b/tests/workflow/test_traffic_matrix_placement.py @@ -18,11 +18,11 @@ def test_traffic_matrix_placement_stores_core_outputs( mock_scenario = MagicMock() mock_td = MagicMock() mock_td.source = "A" - mock_td.sink = "B" - mock_td.demand = 10.0 + mock_td.target = "B" + mock_td.volume = 10.0 mock_td.mode = "pairwise" mock_td.priority = 0 - mock_scenario.traffic_matrix_set.get_matrix.return_value = [mock_td] + mock_scenario.demand_set.get_set.return_value = [mock_td] # Mock FailureManager return value: baseline separate, failure iterations in results mock_raw = { @@ -87,7 +87,7 @@ def test_traffic_matrix_placement_stores_core_outputs( step = TrafficMatrixPlacement( name="tm_step", - matrix_name="default", + demand_set="default", iterations=2, ) mock_scenario.results = Results() @@ -111,11 +111,11 @@ def test_traffic_matrix_placement_flow_details_edges( mock_scenario = MagicMock() mock_td = MagicMock() mock_td.source = "A" - mock_td.sink = "B" - mock_td.demand = 10.0 + mock_td.target = "B" + mock_td.volume = 10.0 mock_td.mode = "pairwise" mock_td.priority = 0 - mock_scenario.traffic_matrix_set.get_matrix.return_value = [mock_td] + mock_scenario.demand_set.get_set.return_value = [mock_td] # Mock FailureManager return value with edges used (baseline separate) mock_raw = { @@ -141,7 +141,7 @@ def test_traffic_matrix_placement_flow_details_edges( "source": "A", "destination": "B", "priority": 0, - "demand": 10.0, + "volume": 10.0, "placed": 8.0, "dropped": 2.0, "cost_distribution": {}, @@ -165,7 +165,7 @@ def test_traffic_matrix_placement_flow_details_edges( "source": "A", "destination": "B", "priority": 0, - "demand": 10.0, + "volume": 10.0, "placed": 10.0, "dropped": 0.0, "cost_distribution": {}, @@ -190,7 +190,7 @@ def test_traffic_matrix_placement_flow_details_edges( step = TrafficMatrixPlacement( name="tm_step", - matrix_name="default", + demand_set="default", iterations=2, include_flow_details=True, include_used_edges=True, @@ -214,11 +214,11 @@ def test_traffic_matrix_placement_alpha_scales_demands( mock_scenario = MagicMock() mock_td = MagicMock() mock_td.source = "S" - mock_td.sink = "T" - mock_td.demand = 10.0 + mock_td.target = "T" + mock_td.volume = 10.0 mock_td.mode = "pairwise" mock_td.priority = 0 - mock_scenario.traffic_matrix_set.get_matrix.return_value = [mock_td] + mock_scenario.demand_set.get_set.return_value = [mock_td] # Mock FailureManager return value (minimal valid structure) mock_raw = { @@ -241,7 +241,7 @@ def test_traffic_matrix_placement_alpha_scales_demands( # Run with alpha scaling step = TrafficMatrixPlacement( name="tm_step_alpha", - matrix_name="default", + demand_set="default", iterations=1, alpha=2.5, ) @@ -254,8 +254,8 @@ def test_traffic_matrix_placement_alpha_scales_demands( dcfg = kwargs.get("demands_config") assert isinstance(dcfg, list) and len(dcfg) == 1 assert dcfg[0]["source"] == "S" - assert dcfg[0]["sink"] == "T" - assert abs(float(dcfg[0]["demand"]) - 25.0) < 1e-12 + assert dcfg[0]["target"] == "T" + assert abs(float(dcfg[0]["volume"]) - 25.0) < 1e-12 @patch("ngraph.workflow.traffic_matrix_placement_step.FailureManager") @@ -265,11 +265,11 @@ def test_traffic_matrix_placement_metadata_includes_alpha( mock_scenario = MagicMock() mock_td = MagicMock() mock_td.source = "A" - mock_td.sink = "B" - mock_td.demand = 1.0 + mock_td.target = "B" + mock_td.volume = 1.0 mock_td.mode = "pairwise" mock_td.priority = 0 - mock_scenario.traffic_matrix_set.get_matrix.return_value = [mock_td] + mock_scenario.demand_set.get_set.return_value = [mock_td] mock_raw = { "results": [ @@ -290,7 +290,7 @@ def test_traffic_matrix_placement_metadata_includes_alpha( step = TrafficMatrixPlacement( name="tm_step_meta", - matrix_name="default", + demand_set="default", iterations=1, alpha=3.0, ) @@ -311,12 +311,12 @@ def test_traffic_matrix_placement_alpha_auto_uses_msd( mock_scenario = MagicMock() td = MagicMock() td.source = "S" - td.sink = "T" - td.demand = 4.0 + td.target = "T" + td.volume = 4.0 td.mode = "pairwise" td.priority = 0 - td.flow_policy_config = None - mock_scenario.traffic_matrix_set.get_matrix.return_value = [td] + td.flow_policy = None + mock_scenario.demand_set.get_set.return_value = [td] # Populate results metadata: prior MSD step # Provide MSD step data in Results store @@ -331,11 +331,11 @@ def test_traffic_matrix_placement_alpha_auto_uses_msd( "base_demands": [ { "source": "S", - "sink": "T", - "demand": 4.0, + "target": "T", + "volume": 4.0, "mode": "pairwise", "priority": 0, - "flow_policy_config": None, + "flow_policy": None, } ], }, @@ -362,7 +362,7 @@ def test_traffic_matrix_placement_alpha_auto_uses_msd( step = TrafficMatrixPlacement( name="tm_auto", - matrix_name="default", + demand_set="default", iterations=1, alpha_from_step="msd1", alpha_from_field="data.alpha_star", @@ -373,7 +373,7 @@ def test_traffic_matrix_placement_alpha_auto_uses_msd( _, kwargs = mock_failure_manager.run_demand_placement_monte_carlo.call_args dcfg = kwargs.get("demands_config") assert isinstance(dcfg, list) and len(dcfg) == 1 - assert abs(float(dcfg[0]["demand"]) - 8.0) < 1e-12 + assert abs(float(dcfg[0]["volume"]) - 8.0) < 1e-12 @patch("ngraph.workflow.traffic_matrix_placement_step.FailureManager") @@ -383,19 +383,19 @@ def test_traffic_matrix_placement_alpha_auto_missing_msd_raises( mock_scenario = MagicMock() td = MagicMock() td.source = "S" - td.sink = "T" - td.demand = 4.0 + td.target = "T" + td.volume = 4.0 td.mode = "pairwise" td.priority = 0 - td.flow_policy_config = None - mock_scenario.traffic_matrix_set.get_matrix.return_value = [td] + td.flow_policy = None + mock_scenario.demand_set.get_set.return_value = [td] # No MSD metadata mock_scenario.results.get_all_step_metadata.return_value = {} step = TrafficMatrixPlacement( name="tm_auto", - matrix_name="default", + demand_set="default", iterations=1, alpha_from_step="msd1", alpha_from_field="data.alpha_star", @@ -413,11 +413,11 @@ def test_traffic_matrix_placement_failure_trace_on_results( mock_scenario = MagicMock() mock_td = MagicMock() mock_td.source = "A" - mock_td.sink = "B" - mock_td.demand = 10.0 + mock_td.target = "B" + mock_td.volume = 10.0 mock_td.mode = "pairwise" mock_td.priority = 0 - mock_scenario.traffic_matrix_set.get_matrix.return_value = [mock_td] + mock_scenario.demand_set.get_set.return_value = [mock_td] # Create mock result with failure_trace and occurrence_count mock_result = MagicMock() @@ -429,8 +429,8 @@ def test_traffic_matrix_placement_failure_trace_on_results( "selections": [ { "rule_index": 0, - "entity_scope": "link", - "rule_type": "choice", + "scope": "link", + "mode": "choice", "matched_count": 5, "selected_ids": ["L1"], } @@ -483,7 +483,7 @@ def test_traffic_matrix_placement_failure_trace_on_results( step = TrafficMatrixPlacement( name="tm_patterns", - matrix_name="default", + demand_set="default", iterations=2, store_failure_patterns=True, ) @@ -513,11 +513,11 @@ def test_traffic_matrix_placement_no_trace_when_disabled( mock_scenario = MagicMock() mock_td = MagicMock() mock_td.source = "A" - mock_td.sink = "B" - mock_td.demand = 10.0 + mock_td.target = "B" + mock_td.volume = 10.0 mock_td.mode = "pairwise" mock_td.priority = 0 - mock_scenario.traffic_matrix_set.get_matrix.return_value = [mock_td] + mock_scenario.demand_set.get_set.return_value = [mock_td] mock_result = MagicMock() mock_result.failure_trace = None # No trace when disabled @@ -549,7 +549,7 @@ def test_traffic_matrix_placement_no_trace_when_disabled( step = TrafficMatrixPlacement( name="tm_no_patterns", - matrix_name="default", + demand_set="default", iterations=1, store_failure_patterns=False, )