diff --git a/.github/workflows/ci-tests.yml b/.github/workflows/ci-tests.yml new file mode 100644 index 0000000..5166c1f --- /dev/null +++ b/.github/workflows/ci-tests.yml @@ -0,0 +1,32 @@ +name: CI Tests + +on: [push, pull_request] + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install . + + - name: Run tests with coverage + run: | + pytest --cov=iris_gpubench tests/ + + - name: Upload coverage report + uses: actions/upload-artifact@v4 + with: + name: coverage-report + path: coverage.xml + diff --git a/setup.py b/setup.py index 7a4b7d3..a9c55c9 100644 --- a/setup.py +++ b/setup.py @@ -16,16 +16,19 @@ packages=find_packages(), # Automatically find packages in the current directory install_requires=[ 'pynvml==11.5.3', - 'requests==2.32.4', - 'pyyaml==6.0.2', - 'tabulate==0.9.0', - 'matplotlib==3.7.5', - 'docker==7.1.0', + 'requests>=2.32.4', + 'pyyaml>=6.0.2', + 'tabulate>=0.9.0', + 'matplotlib>=3.7.5', + 'docker>=7.1.0', + 'pytest>=7.4.2', + 'requests-mock>=1.9.3', + 'pytest-cov>=4.1.0', ], entry_points={ 'console_scripts': [ 'iris-gpubench=iris_gpubench.main:main', # Ensures the script is accessible via `iris-gpubench` command ], }, - python_requires='==3.8.10', # Ensure compatibility with Python 3.8.10 + python_requires='>=3.8.10', # Ensure compatibility with Python 3.8.10 ) \ No newline at end of file diff --git a/tests/test_carbon_metrics.py b/tests/test_carbon_metrics.py new file mode 100644 index 0000000..3d30082 --- /dev/null +++ b/tests/test_carbon_metrics.py @@ -0,0 +1,170 @@ +import pytest +import requests +import requests_mock +from requests.exceptions import Timeout, ConnectionError as RequestsConnectionError + + +from iris_gpubench.carbon_metrics import ( + get_carbon_region_names, + get_carbon_forecast, + CARBON_INTENSITY_URL, +) +from iris_gpubench.utils.globals import DEFAULT_REGION, LOGGER, TIMEOUT_SECONDS + +# --- Mock Data for Successful API Calls --- + +# Successful response structure for the /regional endpoint +SUCCESS_REGIONAL_RESPONSE = { + "data": [ + { + "regions": [ + { + "shortname": "South England", + "intensity": {"forecast": 150, "index": "moderate"}, + }, + { + "shortname": "Scotland", + "intensity": {"forecast": 200, "index": "high"}, + }, + { + "shortname": "Wales", + "intensity": {"forecast": 50, "index": "low"}, + }, + ] + } + ] +} + +# Expected region names for get_carbon_region_names test +EXPECTED_REGION_NAMES = ['South England', 'Scotland', 'Wales'] + +def test_get_carbon_region_names_success(requests_mock): + """Test successful retrieval of region names.""" + requests_mock.get( + CARBON_INTENSITY_URL, + json=SUCCESS_REGIONAL_RESPONSE, + status_code=200 # OK + ) + result = get_carbon_region_names() + assert result == EXPECTED_REGION_NAMES + +def test_get_carbon_region_names_http_error(requests_mock): + """Test handling of HTTP errors (e.g., 404, 500).""" + requests_mock.get( + CARBON_INTENSITY_URL, + status_code=500 # Internal Server Error + ) + result = get_carbon_region_names() + assert result == [] + +def test_get_carbon_region_names_timeout(requests_mock, caplog): + """Test handling of request Timeout.""" + requests_mock.get( + CARBON_INTENSITY_URL, + exc=Timeout # Raise a Timeout exception + ) + result = get_carbon_region_names() + assert result == [] + assert f"Request timed out after {TIMEOUT_SECONDS} seconds." in caplog.text + +def test_get_carbon_region_names_connection_error(requests_mock, caplog): + """Test handling of ConnectionError.""" + requests_mock.get( + CARBON_INTENSITY_URL, + exc=RequestsConnectionError # Raise a ConnectionError + ) + result = get_carbon_region_names() + assert result == [] + assert "Network error occurred" in caplog.text + +def test_get_carbon_region_names_invalid_json(requests_mock, caplog): + """Test handling of invalid JSON response.""" + requests_mock.get( + CARBON_INTENSITY_URL, + text="This is not JSON", + status_code=200 + ) + result = get_carbon_region_names() + assert result == [] + assert "Failed to decode JSON response" in caplog.text + +# --- Tests for get_carbon_forecast --- + +def test_get_carbon_forecast_success(requests_mock): + """Test successful retrieval of carbon forecast for a specified region.""" + target_region = "Wales" + expected_forecast = 50.0 + requests_mock.get( + CARBON_INTENSITY_URL, + json=SUCCESS_REGIONAL_RESPONSE, + status_code=200 + ) + result = get_carbon_forecast(target_region) + assert result == expected_forecast + +def test_get_carbon_forecast_default_region(requests_mock, monkeypatch): + """Test successful retrieval using the default region.""" + # Temporarily set DEFAULT_REGION for the test + monkeypatch.setattr('iris_gpubench.utils.globals.DEFAULT_REGION', 'South England') + expected_forecast = 150.0 + requests_mock.get( + CARBON_INTENSITY_URL, + json=SUCCESS_REGIONAL_RESPONSE, + status_code=200 + ) + # Call without argument to use the mocked default + result = get_carbon_forecast() + assert result == expected_forecast + +def test_get_carbon_forecast_region_not_found(requests_mock, caplog): + """Test case where the requested region is not in the response.""" + missing_region = "London" + requests_mock.get( + CARBON_INTENSITY_URL, + json=SUCCESS_REGIONAL_RESPONSE, + status_code=200 + ) + result = get_carbon_forecast(missing_region) + assert result is None + assert f"Region '{missing_region}' not found in the response." in caplog.text + +def test_get_carbon_forecast_value_error_non_float(requests_mock, caplog): + """Test handling of a non-numeric 'forecast' value (raises ValueError on float() conversion).""" + bad_forecast_data = { + "data": [ + { + "regions": [ + { + "shortname": "Scotland", + "intensity": {"forecast": "not_a_number", "index": "moderate"}, + } + ] + } + ] + } + requests_mock.get( + CARBON_INTENSITY_URL, + json=bad_forecast_data, + status_code=200 + ) + result = get_carbon_forecast("Scotland") + assert result is None + assert "Failed to decode JSON response" in caplog.text + +def test_get_carbon_forecast_http_error(requests_mock): + """Test handling of HTTP errors (e.g., 401 Unauthorized).""" + requests_mock.get( + CARBON_INTENSITY_URL, + status_code=401 + ) + result = get_carbon_forecast("Scotland") + assert result is None + +def test_get_carbon_forecast_timeout(requests_mock): + """Test handling of request Timeout.""" + requests_mock.get( + CARBON_INTENSITY_URL, + exc=Timeout + ) + result = get_carbon_forecast("Scotland") + assert result is None \ No newline at end of file diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..fe536ae --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,171 @@ +import pytest +import sys +from unittest.mock import patch, MagicMock +import argparse +from argparse import Namespace +from iris_gpubench.utils.cli import parse_arguments # Assuming iris_gpubench.utils.cli.py is in the current directory or importable + +# Mock global constants and dependencies +MONITOR_INTERVAL = 5 # Defined in globals.py in the actual code +VALID_REGIONS = ["South England", "North Scotland", "Wales"] + +# --- Fixtures and Mocks --- + +@pytest.fixture +def mock_dependencies(): + """Fixture to mock LOGGER, image_exists, and list_available_images.""" + with ( + patch('iris_gpubench.utils.cli.LOGGER', new_callable=MagicMock) as mock_logger, + patch('iris_gpubench.utils.cli.image_exists', new_callable=MagicMock) as mock_image_exists, + patch('iris_gpubench.utils.cli.list_available_images', new_callable=MagicMock) as mock_list_images, + ): + yield mock_logger, mock_image_exists, mock_list_images + +@pytest.fixture +def mock_carbon_regions(): + """Fixture for the get_carbon_region_names_func.""" + return MagicMock(return_value=VALID_REGIONS) + +@pytest.fixture +def mock_parse_args_exit(): + """Fixture to mock parser.parse_args() and parser.error() to raise SystemExit.""" + # Patch sys.exit to raise SystemExit for all tests that expect an exit + with patch('sys.exit', side_effect=SystemExit) as mock_exit: + yield mock_exit + + +# --- Test Cases for Successful Parsing --- + +def test_parse_arguments_benchmark_image_success(mock_dependencies, mock_carbon_regions): + """Test successful parsing with --benchmark_image and default values.""" + mock_logger, mock_image_exists, mock_list_images = mock_dependencies + mock_image_exists.return_value = True + + test_args = ['iris_gpubench.utils.cli.py', '--benchmark_image', 'my/image'] + with patch('sys.argv', test_args): + args = parse_arguments(mock_carbon_regions) + + assert isinstance(args, Namespace) + assert args.benchmark_image == 'my/image' + assert args.benchmark_command is None + assert args.interval == MONITOR_INTERVAL + assert args.carbon_region == 'South England' + assert args.no_live_monitor is False + assert args.no_plot is False + assert args.live_plot is False + assert args.export_to_meerkat is False + assert args.monitor_logs is False + assert args.nvidia_nsights is False + mock_image_exists.assert_called_once_with('my/image') + + +def test_parse_arguments_benchmark_command_success(mock_dependencies, mock_carbon_regions): + """Test successful parsing with --benchmark_command and default values.""" + mock_logger, mock_image_exists, mock_list_images = mock_dependencies + mock_image_exists.return_value = True + + test_args = ['iris_gpubench.utils.cli.py', '--benchmark_command', 'echo hello'] + with patch('sys.argv', test_args): + args = parse_arguments(mock_carbon_regions) + + assert isinstance(args, Namespace) + assert args.benchmark_command == 'echo hello' + assert args.benchmark_image is None + assert args.carbon_region == 'South England' + mock_image_exists.assert_not_called() + + +def test_parse_arguments_all_options_success(mock_dependencies, mock_carbon_regions): + """Test successful parsing with all flags and custom values.""" + mock_logger, mock_image_exists, mock_list_images = mock_dependencies + mock_image_exists.return_value = True + + test_args = [ + 'iris_gpubench.utils.cli.py', + '--benchmark_image', 'other/image:latest', + '--interval', '10', + '--carbon_region', 'North Scotland', + '--no_live_monitor', + '--no_plot', + '--live_plot', + '--export_to_meerkat', + '--monitor_logs', + '--nvidia_nsights' + ] + with patch('sys.argv', test_args): + args = parse_arguments(mock_carbon_regions) + + assert args.benchmark_image == 'other/image:latest' + assert args.interval == 10 + assert args.carbon_region == 'North Scotland' + assert args.no_live_monitor is True + assert args.no_plot is True + assert args.live_plot is True # Note: live_plot and no_plot are not mutually exclusive in the CLI logic + assert args.export_to_meerkat is True + assert args.monitor_logs is True + assert args.nvidia_nsights is True + mock_carbon_regions.assert_called_once() + mock_image_exists.assert_called_once_with('other/image:latest') + + +# --- Test Cases for Argument Validation Failure --- + + +@patch.object(argparse.ArgumentParser, 'error', side_effect=SystemExit) +def test_parse_arguments_both_benchmark_fails(mock_error, mock_dependencies, mock_carbon_regions): + """Test validation fails when both --benchmark_image and --benchmark_command are provided.""" + test_args = ['iris_gpubench.utils.cli.py', '--benchmark_image', 'img', '--benchmark_command', 'cmd'] + with patch('sys.argv', test_args): + with pytest.raises(SystemExit): + parse_arguments(mock_carbon_regions) + + mock_error.assert_called_once() + mock_dependencies[0].error.assert_called_once() # Check LOGGER was called + + +@pytest.mark.parametrize("invalid_interval", [0, -5]) +def test_parse_arguments_invalid_interval_fails(mock_dependencies, mock_carbon_regions, mock_parse_args_exit, invalid_interval): + """Test validation fails when --interval is not a positive integer.""" + test_args = ['iris_gpubench.utils.cli.py', '--benchmark_command', 'cmd', '--interval', str(invalid_interval)] + + with patch('sys.argv', test_args): + with pytest.raises(SystemExit) as excinfo: + parse_arguments(mock_carbon_regions) + + assert excinfo.type is SystemExit + mock_dependencies[0].error.assert_called_once() # Check LOGGER was called + mock_parse_args_exit.assert_called_once_with(1) + + +def test_parse_arguments_invalid_carbon_region_fails(mock_dependencies, mock_carbon_regions, mock_parse_args_exit): + """Test validation fails when --carbon_region is invalid.""" + mock_carbon_regions.return_value = VALID_REGIONS + invalid_region = "Mars" + test_args = ['iris_gpubench.utils.cli.py', '--benchmark_command', 'cmd', '--carbon_region', invalid_region] + + with patch('sys.argv', test_args): + with pytest.raises(SystemExit) as excinfo: + parse_arguments(mock_carbon_regions) + + assert excinfo.type is SystemExit + mock_dependencies[0].error.assert_called_once() # Check LOGGER was called + mock_parse_args_exit.assert_called_once_with(1) + + +def test_parse_arguments_non_existent_image_fails(mock_dependencies, mock_carbon_regions, mock_parse_args_exit): + """Test validation fails when --benchmark_image is provided but the image does not exist.""" + mock_logger, mock_image_exists, mock_list_images = mock_dependencies + mock_image_exists.return_value = False + mock_list_images.return_value = ["img1", "img2"] + test_image = "nonexistent:latest" + test_args = ['iris_gpubench.utils.cli.py', '--benchmark_image', test_image] + + with patch('sys.argv', test_args): + with pytest.raises(SystemExit) as excinfo: + parse_arguments(mock_carbon_regions) + + assert excinfo.type is SystemExit + mock_image_exists.assert_called_once_with(test_image) + mock_list_images.assert_called_once() + mock_logger.error.assert_called_once_with("Image '%s' does not exist.", test_image) + mock_parse_args_exit.assert_called_once_with(1) \ No newline at end of file