From 35f34595b17ee98bb4a3642dba14e338821774b8 Mon Sep 17 00:00:00 2001 From: James Baskerville Date: Fri, 9 May 2025 16:22:24 +0100 Subject: [PATCH 1/2] Add tests for prompts call and call_stream --- tests/integration/conftest.py | 16 +++++++++++ tests/integration/test_prompts.py | 45 +++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 tests/integration/test_prompts.py diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 72611b5d..955c035c 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -119,6 +119,22 @@ def eval_prompt( pytest.fail(f"Failed to create prompt {prompt_path}: {e}") +@pytest.fixture(scope="function") +def prompt( + humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any] +) -> Generator[TestIdentifiers, None, None]: + prompt_path = f"{sdk_test_dir}/prompt" + try: + response = humanloop_test_client.prompts.upsert( + path=prompt_path, + **test_prompt_config, + ) + yield TestIdentifiers(file_id=response.id, file_path=response.path) + humanloop_test_client.prompts.delete(id=response.id) + except Exception as e: + pytest.fail(f"Failed to create prompt {prompt_path}: {e}") + + @pytest.fixture(scope="function") def output_not_null_evaluator( humanloop_test_client: Humanloop, sdk_test_dir: str diff --git a/tests/integration/test_prompts.py b/tests/integration/test_prompts.py new file mode 100644 index 00000000..8926a06c --- /dev/null +++ b/tests/integration/test_prompts.py @@ -0,0 +1,45 @@ +from humanloop.client import Humanloop +from tests.integration.conftest import TestIdentifiers + + +def test_prompts_call( + humanloop_test_client: Humanloop, + prompt: TestIdentifiers, + test_prompt_config: TestIdentifiers, +) -> None: + response = humanloop_test_client.prompts.call( + path=prompt.file_path, + prompt={**test_prompt_config}, + inputs={"question": "What is the capital of the France?"}, + ) + assert response is not None + assert response.log_id is not None + assert response.logs is not None + for log in response.logs: + assert log is not None + assert log.output or log.error or log.output_message is not None + assert "Paris" in log.output + assert response.prompt.path == prompt.file_path + + +def test_prompts_call_stream( + humanloop_test_client: Humanloop, + prompt: TestIdentifiers, + test_prompt_config: TestIdentifiers, +) -> None: + response = humanloop_test_client.prompts.call_stream( + path=prompt.file_path, + prompt={**test_prompt_config}, + inputs={"question": "What is the capital of the France?"}, + ) + + output = "" + for chunk in response: + assert chunk is not None + assert chunk.output or chunk.error or chunk.output_message is not None + assert chunk.id is not None + assert chunk.prompt_id is not None + assert chunk.version_id is not None + output += chunk.output + + assert "Paris" in output From 92d2cd5d07bded3e301955aaa509f0709b9c553f Mon Sep 17 00:00:00 2001 From: James Baskerville Date: Fri, 9 May 2025 17:00:54 +0100 Subject: [PATCH 2/2] fix: Type checking fixes --- tests/integration/conftest.py | 15 ++++++++------- tests/integration/test_prompts.py | 13 +++++++------ 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 955c035c..d14042a3 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,13 +1,14 @@ -from contextlib import contextmanager, redirect_stdout -from dataclasses import dataclass -import os -from typing import Any, ContextManager, Generator import io -from typing import TextIO +import os import uuid -import pytest +from contextlib import contextmanager, redirect_stdout +from dataclasses import dataclass +from typing import Any, ContextManager, Generator, TextIO + import dotenv +import pytest from humanloop.client import Humanloop +from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams @dataclass @@ -55,7 +56,7 @@ def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None] @pytest.fixture(scope="function") -def test_prompt_config() -> dict[str, Any]: +def test_prompt_config() -> PromptKernelRequestParams: return { "provider": "openai", "model": "gpt-4o-mini", diff --git a/tests/integration/test_prompts.py b/tests/integration/test_prompts.py index 8926a06c..13ca80eb 100644 --- a/tests/integration/test_prompts.py +++ b/tests/integration/test_prompts.py @@ -1,4 +1,5 @@ from humanloop.client import Humanloop + from tests.integration.conftest import TestIdentifiers @@ -7,9 +8,9 @@ def test_prompts_call( prompt: TestIdentifiers, test_prompt_config: TestIdentifiers, ) -> None: - response = humanloop_test_client.prompts.call( + response = humanloop_test_client.prompts.call( # type: ignore [attr-defined] path=prompt.file_path, - prompt={**test_prompt_config}, + prompt={**test_prompt_config}, # type: ignore [misc, arg-type, typeddict-item, dict-item, list-item] inputs={"question": "What is the capital of the France?"}, ) assert response is not None @@ -17,7 +18,7 @@ def test_prompts_call( assert response.logs is not None for log in response.logs: assert log is not None - assert log.output or log.error or log.output_message is not None + assert log.output is not None assert "Paris" in log.output assert response.prompt.path == prompt.file_path @@ -27,16 +28,16 @@ def test_prompts_call_stream( prompt: TestIdentifiers, test_prompt_config: TestIdentifiers, ) -> None: - response = humanloop_test_client.prompts.call_stream( + response = humanloop_test_client.prompts.call_stream( # type: ignore [attr-defined] path=prompt.file_path, - prompt={**test_prompt_config}, + prompt={**test_prompt_config}, # type: ignore [misc, arg-type, typeddict-item, dict-item, list-item] inputs={"question": "What is the capital of the France?"}, ) output = "" for chunk in response: assert chunk is not None - assert chunk.output or chunk.error or chunk.output_message is not None + assert chunk.output is not None assert chunk.id is not None assert chunk.prompt_id is not None assert chunk.version_id is not None