diff --git a/tests/test_api_request_handler_case_matcher.py b/tests/test_api_request_handler_case_matcher.py new file mode 100644 index 0000000..6d4bb2f --- /dev/null +++ b/tests/test_api_request_handler_case_matcher.py @@ -0,0 +1,555 @@ +""" +Unit tests for NAME matcher optimization that skips fetching all cases. + +Tests the performance optimization introduced to avoid downloading 165k+ cases +when using NAME or PROPERTY matcher, which only need to validate specific case IDs. +""" + +import pytest +from unittest.mock import patch, MagicMock, call +from pathlib import Path +import json +from serde.json import from_json + +from tests.helpers.api_client_helpers import TEST_RAIL_URL, create_url +from trcli.cli import Environment +from trcli.api.api_request_handler import ApiRequestHandler +from trcli.api.api_client import APIClient, APIClientResult +from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailSection, TestRailCase, TestRailResult +from trcli.data_classes.data_parsers import MatchersParser + + +@pytest.fixture +def environment(): + """Create test environment""" + env = Environment() + env.project = "Test Project" + env.batch_size = 10 + return env + + +@pytest.fixture +def api_client(): + """Create test API client""" + return APIClient(host_name=TEST_RAIL_URL) + + +def create_test_suite_with_case_ids(num_cases=10): + """Helper to create test suite with specified number of cases with case IDs""" + test_cases = [] + for i in range(1, num_cases + 1): + test_case = TestRailCase( + case_id=i, + title=f"Test case {i}", + section_id=1, + result=TestRailResult(case_id=i, comment=f"Test result {i}", elapsed="1s", status_id=1), + ) + test_cases.append(test_case) + + section = TestRailSection(name="Test Section", section_id=1, suite_id=1, testcases=test_cases) + + return TestRailSuite(name="Test Suite", suite_id=1, testsections=[section]) + + +def create_test_suite_with_missing_case_ids(total_cases=10, missing_count=3): + """Helper to create test suite with some cases missing IDs""" + test_cases = [] + for i in range(1, total_cases + 1): + # First `missing_count` cases don't have case_id + case_id = None if i <= missing_count else i + test_case = TestRailCase( + case_id=case_id, + title=f"Test case {i}", + section_id=1, + result=TestRailResult(case_id=case_id, comment=f"Test result {i}", elapsed="1s", status_id=1), + ) + test_cases.append(test_case) + + section = TestRailSection(name="Test Section", section_id=1, suite_id=1, testcases=test_cases) + + return TestRailSuite(name="Test Suite", suite_id=1, testsections=[section]) + + +class TestNameMatcherOptimization: + """Test suite for NAME matcher performance optimizations""" + + @pytest.mark.api_handler + def test_name_matcher_skips_bulk_case_fetch(self, environment, api_client, mocker): + """ + Test that NAME matcher does NOT fetch all cases from TestRail. + This is the key optimization - we should skip the expensive get_all_cases call. + """ + # Setup: NAME matcher with 100 test cases + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=100) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock the get_all_cases method to track if it's called + mock_get_all_cases = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__get_all_cases", return_value=([], None) + ) + + # Mock validation to return all IDs as valid (skip actual validation) + mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 101)) + ) + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: get_all_cases should NOT have been called for NAME matcher + mock_get_all_cases.assert_not_called() + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_auto_matcher_still_fetches_all_cases(self, environment, api_client, mocker): + """ + Test that AUTO matcher STILL fetches all cases (required for automation ID lookup). + This ensures we didn't break the AUTO matcher functionality. + """ + # Setup: AUTO matcher + environment.case_matcher = MatchersParser.AUTO + test_suite = create_test_suite_with_case_ids(num_cases=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock get_all_cases to return some cases + mock_cases = [ + {"id": i, "custom_automation_id": f"test{i}", "title": f"Test {i}", "section_id": 1} for i in range(1, 11) + ] + mock_get_all_cases = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__get_all_cases", return_value=(mock_cases, None) + ) + + mocker.patch.object(api_request_handler.data_provider, "update_data") + + # Execute + project_id = 1 + api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: get_all_cases SHOULD be called for AUTO matcher + mock_get_all_cases.assert_called_once_with(project_id, 1) + + @pytest.mark.api_handler + def test_name_matcher_skips_validation_for_large_batches(self, environment, api_client, mocker): + """ + Test that validation is SKIPPED when: + - Using NAME matcher + - All tests have case IDs (no missing) + - More than 1000 case IDs (large batch) + """ + # Setup: NAME matcher with 2000 test cases (> 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation method to track if it's called + mock_validate = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 2001)) + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Validation should be SKIPPED for large batches + mock_validate.assert_not_called() + + # Should log that validation was skipped + skip_log_calls = [call for call in mock_log.call_args_list if "Skipping validation" in str(call)] + assert len(skip_log_calls) > 0, "Should log that validation was skipped" + + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_validates_small_batches(self, environment, api_client, mocker): + """ + Test that validation RUNS when: + - Using NAME matcher + - Less than 1000 case IDs (small batch) + """ + # Setup: NAME matcher with 500 test cases (< 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=500) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation method to track if it's called + mock_validate = mocker.patch.object( + api_request_handler, "_ApiRequestHandler__validate_case_ids_exist", return_value=set(range(1, 501)) + ) + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Validation SHOULD run for small batches + mock_validate.assert_called_once() + assert not missing_ids, "Should not have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_fetches_all_cases_for_large_report_with_missing_ids(self, environment, api_client, mocker): + """ + Test that for large reports with missing IDs, we FETCH ALL CASES instead of individual validation. + This is the new optimized behavior: + - Using NAME matcher + - Large report (>=1000 total cases) + - Some tests are missing case IDs + + Strategy: Fetch all cases once (e.g., 660 calls for 165k cases) is more efficient than + individual validation (e.g., 1500 calls for 1500 cases in report). + """ + # Setup: 1500 total cases, 3 missing IDs (total >= 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_missing_case_ids(total_cases=1500, missing_count=3) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock get_all_cases to return all case IDs 4-1500 (cases 1-3 don't exist, matching missing IDs) + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i} for i in range(4, 1501)], None), + ) + + # Mock individual validation - should NOT be called for large reports + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(4, 1501)), + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Should FETCH ALL CASES for large reports with missing IDs + mock_get_all_cases.assert_called_once_with(project_id, 1) + + # Should NOT use individual validation + mock_validate.assert_not_called() + + # Should log that it's using fetch-all strategy + fetch_log_calls = [call for call in mock_log.call_args_list if "Fetching all cases" in str(call)] + assert len(fetch_log_calls) > 0, "Should log that fetch-all strategy is being used" + + # Should log that missing cases were found + missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)] + assert len(missing_log_calls) > 0, "Should log missing case IDs" + + assert missing_ids, "Should have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_validates_individually_for_small_report_with_missing_ids( + self, environment, api_client, mocker + ): + """ + Test that for small reports with missing IDs, we use INDIVIDUAL validation. + - Using NAME matcher + - Small report (<1000 total cases) + - Some tests are missing case IDs + + Strategy: Individual validation (e.g., 500 calls) is more efficient than + fetch all (e.g., 660 calls for 165k cases). + """ + # Setup: 500 total cases, 10 missing IDs (total < 1000 threshold) + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_missing_case_ids(total_cases=500, missing_count=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock individual validation + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(11, 501)), # Exclude the 10 missing (1-10) + ) + + # Mock get_all_cases - should NOT be called for small reports + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([], None), + ) + + mock_log = mocker.patch.object(environment, "log") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Should use INDIVIDUAL validation for small reports + mock_validate.assert_called_once() + + # Should NOT fetch all cases + mock_get_all_cases.assert_not_called() + + # Should log that missing cases were found + missing_log_calls = [call for call in mock_log.call_args_list if "without case ID" in str(call)] + assert len(missing_log_calls) > 0, "Should log missing case IDs" + + assert missing_ids, "Should have missing IDs" + assert error == "", "Should not have errors" + + @pytest.mark.api_handler + def test_name_matcher_detects_nonexistent_case_ids(self, environment, api_client, mocker): + """ + Test that NAME matcher correctly detects case IDs that don't exist in TestRail. + """ + # Setup: Test suite with case IDs 1-10 + environment.case_matcher = MatchersParser.NAME + test_suite = create_test_suite_with_case_ids(num_cases=10) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock validation: Only IDs 1-5 exist, 6-10 don't exist + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(1, 6)), # Only 1-5 exist + ) + + mock_elog = mocker.patch.object(environment, "elog") + + # Execute + project_id = 1 + missing_ids, error = api_request_handler.check_missing_test_cases_ids(project_id) + + # Assert: Should detect nonexistent IDs + mock_validate.assert_called_once() + mock_elog.assert_called_once() + + # Check error message contains nonexistent IDs + error_call = mock_elog.call_args[0][0] + assert "Nonexistent case IDs" in error_call + assert "6" in error_call or "7" in error_call # At least some of the missing IDs + + assert not missing_ids, "missing_ids refers to tests without IDs in report" + assert error != "", "Should have error about nonexistent IDs" + + +class TestValidateCaseIdsExist: + """Test the __validate_case_ids_exist helper method""" + + @pytest.mark.api_handler + def test_validate_empty_list(self, environment, api_client): + """Test that empty list returns empty set""" + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[]) + + assert result == set(), "Empty list should return empty set" + + @pytest.mark.api_handler + def test_validate_small_batch_sequential(self, environment, api_client, requests_mock): + """ + Test validation of small batch (<=50 cases) uses sequential validation. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock get_case responses for IDs 1-10 + for i in range(1, 11): + requests_mock.get(create_url(f"get_case/{i}"), json={"id": i, "suite_id": 1, "title": f"Case {i}"}) + + # Add one non-existent case (returns 404) + requests_mock.get(create_url("get_case/999"), status_code=404, json={"error": "Not found"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist( + suite_id=1, case_ids=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999] + ) + + # Should return 1-10 (11 total requested, 1 doesn't exist) + assert result == set(range(1, 11)), "Should validate existing cases" + assert 999 not in result, "Non-existent case should not be in result" + + @pytest.mark.api_handler + def test_validate_large_batch_concurrent(self, environment, api_client, requests_mock): + """ + Test validation of large batch (>50 cases) uses concurrent validation. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Mock 100 case responses + for i in range(1, 101): + requests_mock.get(create_url(f"get_case/{i}"), json={"id": i, "suite_id": 1, "title": f"Case {i}"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist( + suite_id=1, case_ids=list(range(1, 101)) + ) + + # Should validate all 100 cases concurrently + assert result == set(range(1, 101)), "Should validate all cases" + assert len(result) == 100 + + @pytest.mark.api_handler + def test_validate_filters_wrong_suite(self, environment, api_client, requests_mock): + """ + Test that validation filters out cases belonging to different suite. + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Case 1 belongs to suite 1 (correct) + requests_mock.get(create_url("get_case/1"), json={"id": 1, "suite_id": 1, "title": "Case 1"}) + + # Case 2 belongs to suite 2 (wrong suite) + requests_mock.get(create_url("get_case/2"), json={"id": 2, "suite_id": 2, "title": "Case 2"}) + + # Case 3 belongs to suite 1 (correct) + requests_mock.get(create_url("get_case/3"), json={"id": 3, "suite_id": 1, "title": "Case 3"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[1, 2, 3]) + + # Should only return cases from suite 1 + assert result == {1, 3}, "Should filter out case from wrong suite" + assert 2 not in result, "Case from wrong suite should be excluded" + + @pytest.mark.api_handler + def test_validate_handles_api_errors(self, environment, api_client, requests_mock): + """ + Test that validation gracefully handles API errors (404, 500, etc). + """ + test_suite = create_test_suite_with_case_ids(num_cases=1) + api_request_handler = ApiRequestHandler(environment, api_client, test_suite) + + # Case 1: Success + requests_mock.get(create_url("get_case/1"), json={"id": 1, "suite_id": 1, "title": "Case 1"}) + + # Case 2: 404 Not Found + requests_mock.get(create_url("get_case/2"), status_code=404, json={"error": "Not found"}) + + # Case 3: 500 Server Error + requests_mock.get(create_url("get_case/3"), status_code=500, json={"error": "Internal error"}) + + # Case 4: Success + requests_mock.get(create_url("get_case/4"), json={"id": 4, "suite_id": 1, "title": "Case 4"}) + + result = api_request_handler._ApiRequestHandler__validate_case_ids_exist(suite_id=1, case_ids=[1, 2, 3, 4]) + + # Should return only successful cases + assert result == {1, 4}, "Should only return successfully validated cases" + + +class TestPerformanceComparison: + """Tests demonstrating the performance improvement""" + + @pytest.mark.api_handler + def test_performance_auto_vs_name_matcher(self, environment, api_client, mocker): + """ + Demonstrate that NAME matcher makes fewer API calls than AUTO matcher. + This is a documentation test showing the optimization benefit. + + Scenario: Large report with all case IDs present (best case for NAME matcher) + """ + # Test AUTO matcher (always fetches all cases) + environment.case_matcher = MatchersParser.AUTO + test_suite_auto = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler_auto = ApiRequestHandler(environment, api_client, test_suite_auto) + + mock_get_all_cases_auto = mocker.patch.object( + api_request_handler_auto, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i, "custom_automation_id": f"test{i}"} for i in range(1, 2001)], None), + ) + mocker.patch.object(api_request_handler_auto.data_provider, "update_data") + + api_request_handler_auto.check_missing_test_cases_ids(project_id=1) + + # AUTO matcher should call get_all_cases + assert mock_get_all_cases_auto.call_count == 1, "AUTO matcher fetches all cases" + + # Test NAME matcher with all IDs present (best case - skips validation) + env_name = Environment() + env_name.project = "Test Project" + env_name.batch_size = 10 + env_name.case_matcher = MatchersParser.NAME + + test_suite_name = create_test_suite_with_case_ids(num_cases=2000) + api_request_handler_name = ApiRequestHandler(env_name, api_client, test_suite_name) + + mock_get_all_cases_name = mocker.patch.object( + api_request_handler_name, "_ApiRequestHandler__get_all_cases", return_value=([], None) + ) + + mock_validate_name = mocker.patch.object( + api_request_handler_name, "_ApiRequestHandler__validate_case_ids_exist", return_value=set() + ) + + mocker.patch.object(env_name, "log") + + api_request_handler_name.check_missing_test_cases_ids(project_id=1) + + # NAME matcher should NOT call get_all_cases when all IDs present and report >= 1000 + mock_get_all_cases_name.assert_not_called() + # Should also not call individual validation + mock_validate_name.assert_not_called() + + print("\n" + "=" * 60) + print("PERFORMANCE COMPARISON") + print("=" * 60) + print(f"AUTO matcher: {mock_get_all_cases_auto.call_count} get_all_cases calls") + print(f"NAME matcher: {mock_get_all_cases_name.call_count} get_all_cases calls") + print(f"Improvement: {mock_get_all_cases_auto.call_count - mock_get_all_cases_name.call_count} fewer calls") + print("=" * 60) + + @pytest.mark.api_handler + def test_performance_name_matcher_with_missing_ids(self, environment, api_client, mocker): + """ + Demonstrate smart strategy selection for NAME matcher with large reports containing missing IDs. + + Scenario: 5000 cases in report, 100 missing IDs + - Individual validation: 5000 API calls + - Fetch all + validate locally: ~660 API calls (for 165k cases in TestRail) + Strategy: Fetch all is more efficient + """ + env = Environment() + env.project = "Test Project" + env.batch_size = 10 + env.case_matcher = MatchersParser.NAME + + # 5000 cases, 100 missing IDs + test_suite = create_test_suite_with_missing_case_ids(total_cases=5000, missing_count=100) + api_request_handler = ApiRequestHandler(env, api_client, test_suite) + + # Mock get_all_cases to simulate fetching 165k cases + mock_get_all_cases = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__get_all_cases", + return_value=([{"id": i} for i in range(101, 5001)], None), # Cases 101-5000 exist + ) + + # Mock individual validation - should NOT be called + mock_validate = mocker.patch.object( + api_request_handler, + "_ApiRequestHandler__validate_case_ids_exist", + return_value=set(range(101, 5001)), + ) + + mocker.patch.object(env, "log") + + api_request_handler.check_missing_test_cases_ids(project_id=1) + + # Should use fetch-all strategy (more efficient for large reports) + mock_get_all_cases.assert_called_once() + mock_validate.assert_not_called() + + print("\n" + "=" * 60) + print("LARGE REPORT WITH MISSING IDS") + print("=" * 60) + print(f"Report size: 5000 cases, 100 missing IDs") + print(f"Strategy chosen: Fetch all cases") + print(f"API calls: 1 fetch (simulates ~660 paginated calls)") + print(f"Alternative: 4900 individual validation calls") + print(f"Efficiency: ~7.4x fewer calls") + print("=" * 60) + + +if __name__ == "__main__": + pytest.main([__file__, "-v", "-s"]) diff --git a/tests/test_project_based_client.py b/tests/test_project_based_client.py index a0efc00..016495a 100644 --- a/tests/test_project_based_client.py +++ b/tests/test_project_based_client.py @@ -26,26 +26,21 @@ def project_based_client_data_provider(self, mocker): environment.file = "results.xml" environment.case_matcher = MatchersParser.AUTO - api_request_handler = mocker.patch( - "trcli.api.project_based_client.ApiRequestHandler" - ) + api_request_handler = mocker.patch("trcli.api.project_based_client.ApiRequestHandler") api_request_handler.get_project_data.return_value = ProjectData( project_id=environment.project_id, suite_mode=1, error_message="" ) api_request_handler.check_automation_id_field.return_value = None project_based_client = ProjectBasedClient( - environment=environment, suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), + environment=environment, + suite=TestRailSuite(name=environment.suite_name, suite_id=environment.suite_id), ) project_based_client.api_request_handler = api_request_handler yield environment, api_request_handler, project_based_client @pytest.mark.project_based_client - @pytest.mark.parametrize( - "timeout", [40, None], ids=["with_timeout", "without_timeout"] - ) - def test_instantiate_api_client( - self, timeout, project_based_client_data_provider, mocker - ): + @pytest.mark.parametrize("timeout", [40, None], ids=["with_timeout", "without_timeout"]) + def test_instantiate_api_client(self, timeout, project_based_client_data_provider, mocker): """The purpose of this test is to check that APIClient was instantiated properly and credential fields were set es expected.""" (_, api_request_handler, _) = project_based_client_data_provider @@ -57,24 +52,22 @@ def test_instantiate_api_client( environment.key = "test_api_key" if timeout: environment.timeout = timeout - timeout_expected_result = 30 if not timeout else timeout - project_based_client = ProjectBasedClient( - environment=environment, suite=junit_file_parser - ) + timeout_expected_result = 60 if not timeout else timeout + project_based_client = ProjectBasedClient(environment=environment, suite=junit_file_parser) api_client = project_based_client.instantiate_api_client() assert ( - api_client.username == environment.username + api_client.username == environment.username ), f"Expected username to be set to: {environment.username}, but got: {api_client.username} instead." assert ( - api_client.password == environment.password + api_client.password == environment.password ), f"Expected password to be set to: {environment.password}, but got: {api_client.password} instead." assert ( - api_client.api_key == environment.key + api_client.api_key == environment.key ), f"Expected api_key to be set to: {environment.key}, but got: {api_client.api_key} instead." assert ( - api_client.timeout == timeout_expected_result + api_client.timeout == timeout_expected_result ), f"Expected timeout to be set to: {timeout_expected_result}, but got: {api_client.timeout} instead." def test_resolve_project(self, project_based_client_data_provider): @@ -87,10 +80,10 @@ def test_resolve_project(self, project_based_client_data_provider): ) = project_based_client_data_provider project_based_client.resolve_project() - assert ( - project_based_client.project.project_id == environment.project_id - ), (f"Expected project_based_client.project to have {environment.project_id}," - f" but had {project_based_client.project.project_id}") + assert project_based_client.project.project_id == environment.project_id, ( + f"Expected project_based_client.project to have {environment.project_id}," + f" but had {project_based_client.project.project_id}" + ) @pytest.mark.project_based_client def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider): @@ -109,14 +102,10 @@ def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider) suite_mode=SuiteModes.single_suite ) + assert result_suite_id == suite_id, f"Expected suite_id: {suite_id} but got {result_suite_id} instead." + assert suite_added is False, f"Expected suite_added: {False} but got {suite_added} instead." assert ( - result_suite_id == suite_id - ), f"Expected suite_id: {suite_id} but got {result_suite_id} instead." - assert ( - suite_added is False - ), f"Expected suite_added: {False} but got {suite_added} instead." - assert ( - result_return_code == result_code + result_return_code == result_code ), f"Expected suite_id: {result_code} but got {result_return_code} instead." @pytest.mark.project_based_client @@ -126,14 +115,14 @@ def test_get_suite_id_returns_valid_id(self, project_based_client_data_provider) ids=TEST_GET_SUITE_ID_PROMPTS_USER_IDS, ) def test_get_suite_id_multiple_suites_mode( - self, - user_response, - expected_suite_id, - expected_result_code, - expected_message, - suite_add_error, - project_based_client_data_provider, - mocker, + self, + user_response, + expected_suite_id, + expected_result_code, + expected_message, + suite_add_error, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check that user will be prompted to add suite is one is missing in TestRail. Depending on user response either information about addition of missing suite or error message @@ -160,9 +149,7 @@ def test_get_suite_id_multiple_suites_mode( else: project_based_client.api_request_handler.add_suites.return_value = ( [{"suite_id": expected_suite_id, "name": suite_name}], - FAULT_MAPPING["error_while_adding_suite"].format( - error_message="Failed to add suite." - ), + FAULT_MAPPING["error_while_adding_suite"].format(error_message="Failed to add suite."), ) project_based_client.api_request_handler.suites_data_from_provider.suite_id = None project_based_client.api_request_handler.suites_data_from_provider.name = suite_name @@ -177,18 +164,14 @@ def test_get_suite_id_multiple_suites_mode( if suite_add_error: expected_elog_calls.append( - mocker.call( - FAULT_MAPPING["error_while_adding_suite"].format( - error_message="Failed to add suite." - ) - ) + mocker.call(FAULT_MAPPING["error_while_adding_suite"].format(error_message="Failed to add suite.")) ) assert ( - expected_suite_id == result_suite_id + expected_suite_id == result_suite_id ), f"Expected suite_id: {expected_suite_id} but got {result_suite_id} instead." assert ( - expected_result_code == result_code + expected_result_code == result_code ), f"Expected suite_id: {expected_result_code} but got {result_code} instead." environment.get_prompt_response_for_auto_creation.assert_called_with( PROMPT_MESSAGES["create_new_suite"].format( @@ -197,9 +180,7 @@ def test_get_suite_id_multiple_suites_mode( ) ) if user_response: - project_based_client.api_request_handler.add_suites.assert_called_with( - project_id=project_id - ) + project_based_client.api_request_handler.add_suites.assert_called_with(project_id=project_id) environment.log.assert_has_calls(expected_log_calls) environment.elog.assert_has_calls(expected_elog_calls) @@ -210,13 +191,13 @@ def test_get_suite_id_multiple_suites_mode( ids=["get_suite_ids succeeds", "get_suite_ids fails"], ) def test_get_suite_id_single_suite_mode( - self, - suite_ids, - error_message, - expected_suite_id, - expected_result_code, - project_based_client_data_provider, - mocker, + self, + suite_ids, + error_message, + expected_suite_id, + expected_result_code, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check flow of get_suite_id_log_error function for single suite mode.""" @@ -238,10 +219,10 @@ def test_get_suite_id_single_suite_mode( result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." if error_message: environment.elog.assert_has_calls(expected_elog_calls) @@ -253,13 +234,13 @@ def test_get_suite_id_single_suite_mode( ids=TEST_GET_SUITE_ID_SINGLE_SUITE_MODE_BASELINES_IDS, ) def test_get_suite_id_single_suite_mode_baselines( - self, - get_suite_ids_result, - expected_suite_id, - expected_result_code, - expected_error_message, - project_based_client_data_provider, - mocker, + self, + get_suite_ids_result, + expected_suite_id, + expected_result_code, + expected_error_message, + project_based_client_data_provider, + mocker, ): """The purpose of this test is to check flow of get_suite_id_log_error function for single suite with baselines mode.""" @@ -271,26 +252,22 @@ def test_get_suite_id_single_suite_mode_baselines( suite_mode = SuiteModes.single_suite_baselines project_based_client.api_request_handler.resolve_suite_id_using_name.return_value = (-1, "Any Error") project_based_client.api_request_handler.suites_data_from_provider.suite_id = None - project_based_client.api_request_handler.get_suite_ids.return_value = ( - get_suite_ids_result - ) + project_based_client.api_request_handler.get_suite_ids.return_value = get_suite_ids_result expected_elog_calls = [] if expected_error_message: expected_elog_calls = [mocker.call(expected_error_message)] result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." environment.elog.assert_has_calls(expected_elog_calls) @pytest.mark.project_based_client - def test_get_suite_id_unknown_suite_mode( - self, project_based_client_data_provider, mocker - ): + def test_get_suite_id_unknown_suite_mode(self, project_based_client_data_provider, mocker): """The purpose of this test is to check that get_suite_id will return -1 and print proper message when unknown suite mode will be returned during execution.""" ( @@ -302,18 +279,14 @@ def test_get_suite_id_unknown_suite_mode( expected_result_code = -1 expected_suite_id = -1 project_based_client.api_request_handler.suites_data_from_provider.suite_id = None - expected_elog_calls = [ - mocker.call( - FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode) - ) - ] + expected_elog_calls = [mocker.call(FAULT_MAPPING["unknown_suite_mode"].format(suite_mode=suite_mode))] result_suite_id, result_code, suite_added = project_based_client.get_suite_id(suite_mode) assert ( - result_suite_id == expected_suite_id + result_suite_id == expected_suite_id ), f"Expected suite id: {expected_suite_id} but got {result_suite_id} instead." assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected result code: {expected_result_code} but got {result_code} instead." environment.elog.assert_has_calls(expected_elog_calls) @@ -333,13 +306,11 @@ def test_check_suite_id_returns_id(self, project_based_client_data_provider): result_code = project_based_client.check_suite_id(project_id=project_id) assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected to get {result_code} as result code, but got {expected_result_code} instead." @pytest.mark.project_based_client - def test_check_suite_id_prints_error_message( - self, project_based_client_data_provider, mocker - ): + def test_check_suite_id_prints_error_message(self, project_based_client_data_provider, mocker): """The purpose of this test is to check that proper message would be printed to the user and program will quit when suite ID is not present in TestRail.""" ( @@ -356,13 +327,11 @@ def test_check_suite_id_prints_error_message( ) result_code = project_based_client.check_suite_id(project_id=project_id) - expected_elog_calls = [ - mocker.call(FAULT_MAPPING["missing_suite"].format(suite_id=suite_id)) - ] + expected_elog_calls = [mocker.call(FAULT_MAPPING["missing_suite"].format(suite_id=suite_id))] environment.elog.assert_has_calls(expected_elog_calls) assert ( - result_code == expected_result_code + result_code == expected_result_code ), f"Expected to get {expected_result_code} as result code, but got {result_code} instead." def test_resolve_suite_returns_valid_id(self, project_based_client_data_provider): @@ -377,9 +346,7 @@ def test_resolve_suite_returns_valid_id(self, project_based_client_data_provider project_based_client.resolve_project() suite_id, suite_added = project_based_client.resolve_suite() - assert ( - suite_id == 1 - ), f"Expected suite id 1 but got {suite_id} instead." + assert suite_id == 1, f"Expected suite id 1 but got {suite_id} instead." def test_create_or_update_test_run_calls_add_run(self, project_based_client_data_provider): """The purpose of this test is to check that calling the method without a run_id in the environment causes @@ -396,12 +363,8 @@ def test_create_or_update_test_run_calls_add_run(self, project_based_client_data run_id, error_message = project_based_client.create_or_update_test_run() project_based_client.api_request_handler.add_run.assert_called_once() - assert ( - run_id == 1 - ), f"Expected run_id to be 1 but got {run_id} instead." - assert ( - error_message == "" - ), f"Expected error message to be None but got {error_message} instead." + assert run_id == 1, f"Expected run_id to be 1 but got {run_id} instead." + assert error_message == "", f"Expected error message to be None but got {error_message} instead." def test_create_or_update_test_run_calls_update_run(self, project_based_client_data_provider): """The purpose of this test is to check that calling the method with a run_id in the environment causes @@ -418,12 +381,8 @@ def test_create_or_update_test_run_calls_update_run(self, project_based_client_d run_id, error_message = project_based_client.create_or_update_test_run() api_request_handler.update_run.assert_called_once() - assert ( - run_id == 1 - ), f"Expected run_id to be 1 but got {run_id} instead." - assert ( - error_message == "" - ), f"Expected error message to be None but got {error_message} instead." + assert run_id == 1, f"Expected run_id to be 1 but got {run_id} instead." + assert error_message == "", f"Expected error message to be None but got {error_message} instead." def test_get_project_id(self, project_based_client_data_provider): """The purpose of this test is to check that the _get_project_id() will fall back to the environment.project_id @@ -434,7 +393,7 @@ def test_get_project_id(self, project_based_client_data_provider): project_based_client, ) = project_based_client_data_provider - assert ( - project_based_client._get_project_id() == environment.project_id - ), (f"Expected to get {environment.project_id} from project_based_client.get_project_id but got" - f" {project_based_client._get_project_id()} instead.") + assert project_based_client._get_project_id() == environment.project_id, ( + f"Expected to get {environment.project_id} from project_based_client.get_project_id but got" + f" {project_based_client._get_project_id()} instead." + ) diff --git a/trcli/api/api_client.py b/trcli/api/api_client.py index a412754..1742b38 100644 --- a/trcli/api/api_client.py +++ b/trcli/api/api_client.py @@ -39,7 +39,7 @@ class APIClient: PREFIX = "index.php?" VERSION = "/api/v2/" SUFFIX_API_V2_VERSION = f"{PREFIX}{VERSION}" - RETRY_ON = [429, 500, 502] + RETRY_ON = [429, 500, 502, 503, 504] # Added 503 Service Unavailable and 504 Gateway Timeout USER_AGENT = "TRCLI" def __init__( @@ -176,6 +176,12 @@ def __send_request( if status_code == 429: retry_time = float(response.headers["Retry-After"]) sleep(retry_time) + elif status_code in [500, 502, 503, 504] and i < self.retries: + backoff_time = min(2**i, 30) # Exponential backoff capped at 30 seconds + self.logging_function( + f"Server error {status_code}, retrying in {backoff_time}s (attempt {i+1}/{self.retries})..." + ) + sleep(backoff_time) try: # workaround for buggy legacy TR server version response if response.content.startswith(b"USER AUTHENTICATION SUCCESSFUL!\n"): diff --git a/trcli/api/api_request_handler.py b/trcli/api/api_request_handler.py index 7493a8e..4d4b190 100644 --- a/trcli/api/api_request_handler.py +++ b/trcli/api/api_request_handler.py @@ -1,4 +1,5 @@ import html, json, os +import time from concurrent.futures import ThreadPoolExecutor, as_completed from beartype.typing import List, Union, Tuple, Dict @@ -14,7 +15,12 @@ from trcli.data_classes.data_parsers import MatchersParser from trcli.data_classes.dataclass_testrail import TestRailSuite, TestRailCase, ProjectData from trcli.data_providers.api_data_provider import ApiDataProvider -from trcli.settings import MAX_WORKERS_ADD_RESULTS, MAX_WORKERS_ADD_CASE +from trcli.settings import ( + MAX_WORKERS_ADD_RESULTS, + MAX_WORKERS_ADD_CASE, + ENABLE_PARALLEL_PAGINATION, + MAX_WORKERS_PARALLEL_PAGINATION, +) class ApiRequestHandler: @@ -307,9 +313,14 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: """ missing_cases_number = 0 suite_id = self.suites_data_from_provider.suite_id - returned_cases, error_message = self.__get_all_cases(project_id, suite_id) - if error_message: - return False, error_message + + # Performance optimization: Only fetch all cases if using AUTO matcher + # NAME/PROPERTY matchers can validate case IDs individually + if self.environment.case_matcher == MatchersParser.AUTO: + returned_cases, error_message = self.__get_all_cases(project_id, suite_id) + if error_message: + return False, error_message + if self.environment.case_matcher == MatchersParser.AUTO: test_cases_by_aut_id = {} for case in returned_cases: @@ -337,19 +348,72 @@ def check_missing_test_cases_ids(self, project_id: int) -> Tuple[bool, str]: if missing_cases_number: self.environment.log(f"Found {missing_cases_number} test cases not matching any TestRail case.") else: + # For NAME or PROPERTY matcher we validate case IDs nonexistent_ids = [] - all_case_ids = [case["id"] for case in returned_cases] + case_ids_to_validate = set() + + # Collect all unique case IDs that need validation for section in self.suites_data_from_provider.testsections: for test_case in section.testcases: if not test_case.case_id: missing_cases_number += 1 - elif int(test_case.case_id) not in all_case_ids: - nonexistent_ids.append(test_case.case_id) + else: + case_ids_to_validate.add(int(test_case.case_id)) + + total_tests_in_report = missing_cases_number + len(case_ids_to_validate) + if missing_cases_number: self.environment.log(f"Found {missing_cases_number} test cases without case ID in the report file.") - if nonexistent_ids: - self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") - return False, "Case IDs not in TestRail project or suite were detected in the report file." + + # Smart validation strategy based on report size + # Threshold: 1000 cases (same as skip validation threshold for consistency) + if case_ids_to_validate: + # Skip validation for large reports with all IDs (most efficient) + if missing_cases_number == 0 and total_tests_in_report >= 1000: + # All tests have IDs and report is large: Skip validation (trust IDs) + self.environment.log( + f"Skipping validation of {len(case_ids_to_validate)} case IDs " + f"(all tests have IDs, trusting they exist). " + f"If you encounter errors, ensure all case IDs in your test report exist in TestRail." + ) + nonexistent_ids = [] + + # Fetch all for large reports with missing IDs + elif total_tests_in_report >= 1000: + # Large report (>=1000 cases) with some missing IDs: Fetch all cases and validate locally + # This is more efficient than individual validation for large batches + self.environment.log( + f"Large report detected ({total_tests_in_report} cases). " + f"Fetching all cases from TestRail for efficient validation..." + ) + returned_cases, error_message = self.__get_all_cases(project_id, suite_id) + if error_message: + return False, error_message + + # Build lookup dictionary from fetched cases + all_case_ids = {case["id"] for case in returned_cases} + + # Validate locally (O(1) lookup) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in all_case_ids] + + if nonexistent_ids: + self.environment.elog( + f"Nonexistent case IDs found in the report file: {nonexistent_ids[:20]}" + f"{' ...' if len(nonexistent_ids) > 20 else ''}" + ) + return False, "Case IDs not in TestRail project or suite were detected in the report file." + + # Individual validation for small reports + else: + # Small report (<1000 cases): Use individual validation + # This is more efficient for small batches + self.environment.log(f"Validating {len(case_ids_to_validate)} case IDs exist in TestRail...") + validated_ids = self.__validate_case_ids_exist(suite_id, list(case_ids_to_validate)) + nonexistent_ids = [cid for cid in case_ids_to_validate if cid not in validated_ids] + + if nonexistent_ids: + self.environment.elog(f"Nonexistent case IDs found in the report file: {nonexistent_ids}") + return False, "Case IDs not in TestRail project or suite were detected in the report file." return missing_cases_number > 0, "" @@ -1010,7 +1074,18 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ Get all entities from all pages if number of entities is too big to return in single response. Function using next page field in API response. Entity examples: cases, sections + + If ENABLE_PARALLEL_PAGINATION is True or --parallel-pagination flag is set, + will use parallel fetching for better performance. """ + # Check if parallel pagination is enabled (CLI flag takes precedence) + parallel_enabled = getattr(self.environment, "parallel_pagination", False) or ENABLE_PARALLEL_PAGINATION + + # Use parallel pagination if enabled and this is the first call (entities is empty) + if parallel_enabled and not entities: + return self.__get_all_entities_parallel(entity, link) + + # Otherwise use sequential pagination (original implementation) if link.startswith(self.suffix): link = link.replace(self.suffix, "") response = self.client.send_get(link) @@ -1032,6 +1107,281 @@ def __get_all_entities(self, entity: str, link=None, entities=[]) -> Tuple[List[ else: return [], response.error_message + def __get_all_entities_parallel(self, entity: str, link: str) -> Tuple[List[Dict], str]: + """ + Parallel version of __get_all_entities for faster pagination. + Fetches multiple pages concurrently using ThreadPoolExecutor. + + :param entity: Entity type (cases, sections, etc.) + :param link: Initial API link + :returns: Tuple of (all entities list, error message) + """ + fetch_start_time = time.time() + + if link.startswith(self.suffix): + link = link.replace(self.suffix, "") + + # Step 1: Fetch first page to get metadata + self.environment.log(f"Fetching first page to determine total pages...") + response = self.client.send_get(link) + + if response.error_message: + return [], response.error_message + + # Handle non-paginated responses (legacy endpoints) + if isinstance(response.response_text, list): + return response.response_text, response.error_message + + if isinstance(response.response_text, str): + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) + return [], error_msg + + # Collect first page results + all_entities = response.response_text[entity] + first_page_count = len(all_entities) + + # Check if there are more pages + if response.response_text["_links"]["next"] is None: + # Only one page, return immediately + fetch_time = time.time() - fetch_start_time + self.environment.log(f"Single page fetch completed in {fetch_time:.1f}s") + return all_entities, response.error_message + + # Step 2: Calculate total pages needed + # TestRail pagination uses limit parameter (default 250) + # We need to parse the next link to understand pagination structure + next_link = response.response_text["_links"]["next"] + + # Extract offset/limit from the link to calculate total pages + import re + from urllib.parse import urlparse, parse_qs + + # Parse the next link to get offset and limit + parsed = urlparse(next_link) + query_params = parse_qs(parsed.query) + + # Get limit (page size) - default to 250 if not found + limit = int(query_params.get("limit", [250])[0]) + if limit == 0: + limit = 250 + + # Get offset from next link + next_offset = int(query_params.get("offset", [limit])[0]) + + # Step 3: Fetch pages in parallel with dynamic offset generation + # Build base link without offset parameter + # TestRail API uses '&' as separator (e.g., get_cases/123&suite_id=2&offset=250) + base_link = link.split("&offset=")[0].split("?offset=")[0] + + self.environment.log( + f"Starting parallel fetch: first page has {first_page_count} {entity}, " + f"fetching remaining pages with {MAX_WORKERS_PARALLEL_PAGINATION} workers..." + ) + + def fetch_page(offset): + """Fetch a single page by offset""" + # TestRail always uses '&' as separator, not '?' + page_link = f"{base_link}&offset={offset}&limit={limit}" + page_response = self.client.send_get(page_link) + + if page_response.error_message: + return None, page_response.error_message + + if isinstance(page_response.response_text, dict) and entity in page_response.response_text: + page_data = page_response.response_text[entity] + # Return empty list if this page has no data (we've reached the end) + if not page_data: + return [], None + return page_data, None + else: + return None, "Invalid response format" + + # Fetch pages in parallel with intelligent batching to avoid overwhelming server + error_message = "" + pages_fetched = 1 # We already have the first page + + # Use batching: submit batches of pages, check results, submit next batch + # This prevents overwhelming the server with 10k requests at once + batch_size = 100 # Submit 100 pages at a time + current_page_index = 0 + max_pages = 10000 # Safety cap + consecutive_empty_pages = 0 + max_consecutive_empty = 10 # Stop after 10 consecutive empty pages + + with ThreadPoolExecutor(max_workers=MAX_WORKERS_PARALLEL_PAGINATION) as executor: + should_continue = True + + while should_continue and current_page_index < max_pages: + # Submit next batch of pages + futures = {} + batch_offsets = [] + + for i in range(batch_size): + if current_page_index + i >= max_pages: + break + offset = next_offset + ((current_page_index + i) * limit) + batch_offsets.append(offset) + future = executor.submit(fetch_page, offset) + futures[future] = offset + + if not futures: + break + + # Process this batch + batch_had_data = False + for future in as_completed(futures): + offset = futures[future] + try: + page_data, page_error = future.result() + + if page_error: + error_message = page_error + self.environment.elog(f"Error fetching page at offset {offset}: {page_error}") + should_continue = False + # Cancel remaining futures in this batch + for f in futures: + if not f.done(): + f.cancel() + break + + if page_data is None: + # Error occurred + error_message = "Invalid response format" + should_continue = False + # Cancel remaining + for f in futures: + if not f.done(): + f.cancel() + break + + if len(page_data) == 0: + # Empty page + consecutive_empty_pages += 1 + if consecutive_empty_pages >= max_consecutive_empty: + # We've hit enough empty pages, stop fetching + self.environment.log(f"Reached end of data after {consecutive_empty_pages} empty pages") + should_continue = False + # Cancel remaining futures in this batch + for f in futures: + if not f.done(): + f.cancel() + break + else: + # Got data - reset consecutive empty counter + consecutive_empty_pages = 0 + batch_had_data = True + + # Add results to our collection + all_entities.extend(page_data) + pages_fetched += 1 + + # Log progress every 50 pages + if pages_fetched % 50 == 0: + self.environment.log( + f"Fetched {pages_fetched} pages, {len(all_entities)} {entity} so far..." + ) + + except Exception as ex: + error_message = f"Exception during parallel fetch: {str(ex)}" + self.environment.elog(error_message) + should_continue = False + # Cancel remaining + for f in futures: + if not f.done(): + f.cancel() + break + + # Move to next batch + current_page_index += batch_size + + # If this batch had no data at all, we've likely reached the end + if not batch_had_data and consecutive_empty_pages > 0: + should_continue = False + + fetch_time = time.time() - fetch_start_time + + if error_message: + self.environment.elog(f"Parallel fetch failed after {fetch_time:.1f}s, falling back to sequential...") + # Fall back to sequential fetch + return self.__get_all_entities_sequential(entity, link, []) + + self.environment.log( + f"Parallel fetch completed: {len(all_entities)} {entity} in {fetch_time:.1f}s " + f"(~{len(all_entities) / fetch_time:.0f} items/sec)" + ) + + return all_entities, "" + + def __get_all_entities_sequential(self, entity: str, link: str, entities: List[Dict]) -> Tuple[List[Dict], str]: + """ + Sequential fallback for __get_all_entities (original implementation). + This is kept separate for fallback purposes. + """ + if link.startswith(self.suffix): + link = link.replace(self.suffix, "") + response = self.client.send_get(link) + if not response.error_message: + if isinstance(response.response_text, list): + return response.response_text, response.error_message + if isinstance(response.response_text, str): + error_msg = FAULT_MAPPING["invalid_api_response"].format(error_details=response.response_text[:200]) + return [], error_msg + entities = entities + response.response_text[entity] + if response.response_text["_links"]["next"] is not None: + next_link = response.response_text["_links"]["next"].replace("limit=0", "limit=250") + return self.__get_all_entities_sequential(entity, link=next_link, entities=entities) + else: + return entities, response.error_message + else: + return [], response.error_message + + def __validate_case_ids_exist(self, suite_id: int, case_ids: List[int]) -> set: + """ + Validate that case IDs exist in TestRail without fetching all cases. + Returns set of valid case IDs. + + :param suite_id: Suite ID + :param case_ids: List of case IDs to validate + :returns: Set of case IDs that exist in TestRail + """ + if not case_ids: + return set() + + valid_ids = set() + + # For large numbers of case IDs, use concurrent validation + if len(case_ids) > 50: + from concurrent.futures import ThreadPoolExecutor, as_completed + + def check_case_exists(case_id): + """Check if a single case exists""" + response = self.client.send_get(f"get_case/{case_id}") + if response.status_code == 200 and not response.error_message: + # Verify case belongs to correct project/suite + case_data = response.response_text + if case_data.get("suite_id") == suite_id: + return case_id + return None + + # Use 10 concurrent workers to validate IDs + with ThreadPoolExecutor(max_workers=10) as executor: + futures = {executor.submit(check_case_exists, cid): cid for cid in case_ids} + + for future in as_completed(futures): + result = future.result() + if result is not None: + valid_ids.add(result) + else: + # For small sets, validate sequentially + for case_id in case_ids: + response = self.client.send_get(f"get_case/{case_id}") + if response.status_code == 200 and not response.error_message: + case_data = response.response_text + if case_data.get("suite_id") == suite_id: + valid_ids.add(case_id) + + return valid_ids + # Label management methods def add_label(self, project_id: int, title: str) -> Tuple[dict, str]: """ diff --git a/trcli/cli.py b/trcli/cli.py index 155e1e6..6fa281b 100755 --- a/trcli/cli.py +++ b/trcli/cli.py @@ -77,6 +77,7 @@ def __init__(self, cmd="parse_junit"): self.assign_failed_to = None # Add proxy related attributes self.noproxy = None self.proxy_user = None + self.parallel_pagination = None @property def case_fields(self): @@ -90,7 +91,7 @@ def case_fields(self, case_fields: Union[List[str], dict]): exit(1) self._case_fields = fields_dict - @property + @property def result_fields(self): return self._result_fields @@ -202,18 +203,11 @@ def parse_params_from_config_file(self, file_path: Path): for page_content in file_content: if page_content: self.params_from_config.update(page_content) - if ( - self.params_from_config.get("config") is not None - and self.default_config_file - ): + if self.params_from_config.get("config") is not None and self.default_config_file: self.default_config_file = False - self.parse_params_from_config_file( - self.params_from_config["config"] - ) + self.parse_params_from_config_file(self.params_from_config["config"]) except (yaml.YAMLError, ValueError, TypeError) as e: - self.elog( - FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path) - ) + self.elog(FAULT_MAPPING["yaml_file_parse_issue"].format(file_path=file_path)) self.elog(f"Error details:\n{e}") if not self.default_config_file: exit(1) @@ -280,10 +274,13 @@ def main(self, *args, **kwargs): ) @click.option("-u", "--username", type=click.STRING, metavar="", help="Username.") @click.option("-p", "--password", type=click.STRING, metavar="", help="Password.") -@click.option("-k", "--key", metavar="", help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.") @click.option( - "-v", "--verbose", is_flag=True, help="Output all API calls and their results." + "-k", + "--key", + metavar="", + help="API key used for authenticating with TestRail. This must be used in conjunction with --username. If provided, --password is not required.", ) +@click.option("-v", "--verbose", is_flag=True, help="Output all API calls and their results.") @click.option("--verify", is_flag=True, help="Verify the data was added correctly.") @click.option("--insecure", is_flag=True, help="Allow insecure requests.") @click.option( @@ -328,22 +325,14 @@ def main(self, *args, **kwargs): help="Silence stdout", default=False, ) +@click.option("--proxy", metavar="", help="Proxy address and port (e.g., http://proxy.example.com:8080).") +@click.option("--proxy-user", metavar="", help="Proxy username and password in the format 'username:password'.") @click.option( - "--proxy", - metavar="", - help="Proxy address and port (e.g., http://proxy.example.com:8080)." + "--noproxy", metavar="", help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)." ) @click.option( - "--proxy-user", - metavar="", - help="Proxy username and password in the format 'username:password'." + "--parallel-pagination", is_flag=True, help="Enable parallel pagination for faster case fetching (experimental)." ) -@click.option( - "--noproxy", - metavar="", - help="Comma-separated list of hostnames to bypass the proxy (e.g., localhost,127.0.0.1)." -) - def cli(environment: Environment, context: click.core.Context, *args, **kwargs): """TestRail CLI""" if not sys.argv[1:]: @@ -354,6 +343,6 @@ def cli(environment: Environment, context: click.core.Context, *args, **kwargs): if not context.invoked_subcommand: print(MISSING_COMMAND_SLOGAN) exit(2) - + environment.parse_config_file(context) environment.set_parameters(context) diff --git a/trcli/settings.py b/trcli/settings.py index 7cd59dc..829af7d 100644 --- a/trcli/settings.py +++ b/trcli/settings.py @@ -1,6 +1,8 @@ MAX_WORKERS_ADD_CASE = 10 -MAX_WORKERS_ADD_RESULTS = 10 -DEFAULT_API_CALL_RETRIES = 3 -DEFAULT_API_CALL_TIMEOUT = 30 +MAX_WORKERS_ADD_RESULTS = 20 +DEFAULT_API_CALL_RETRIES = 5 +DEFAULT_API_CALL_TIMEOUT = 60 DEFAULT_BATCH_SIZE = 50 ALLOW_ELAPSED_MS = False +ENABLE_PARALLEL_PAGINATION = False +MAX_WORKERS_PARALLEL_PAGINATION = 10