diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py index 72611b5..d14042a 100644 --- a/tests/integration/conftest.py +++ b/tests/integration/conftest.py @@ -1,13 +1,14 @@ -from contextlib import contextmanager, redirect_stdout -from dataclasses import dataclass -import os -from typing import Any, ContextManager, Generator import io -from typing import TextIO +import os import uuid -import pytest +from contextlib import contextmanager, redirect_stdout +from dataclasses import dataclass +from typing import Any, ContextManager, Generator, TextIO + import dotenv +import pytest from humanloop.client import Humanloop +from humanloop.requests.prompt_kernel_request import PromptKernelRequestParams @dataclass @@ -55,7 +56,7 @@ def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None] @pytest.fixture(scope="function") -def test_prompt_config() -> dict[str, Any]: +def test_prompt_config() -> PromptKernelRequestParams: return { "provider": "openai", "model": "gpt-4o-mini", @@ -119,6 +120,22 @@ def eval_prompt( pytest.fail(f"Failed to create prompt {prompt_path}: {e}") +@pytest.fixture(scope="function") +def prompt( + humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any] +) -> Generator[TestIdentifiers, None, None]: + prompt_path = f"{sdk_test_dir}/prompt" + try: + response = humanloop_test_client.prompts.upsert( + path=prompt_path, + **test_prompt_config, + ) + yield TestIdentifiers(file_id=response.id, file_path=response.path) + humanloop_test_client.prompts.delete(id=response.id) + except Exception as e: + pytest.fail(f"Failed to create prompt {prompt_path}: {e}") + + @pytest.fixture(scope="function") def output_not_null_evaluator( humanloop_test_client: Humanloop, sdk_test_dir: str diff --git a/tests/integration/test_prompts.py b/tests/integration/test_prompts.py new file mode 100644 index 0000000..13ca80e --- /dev/null +++ b/tests/integration/test_prompts.py @@ -0,0 +1,46 @@ +from humanloop.client import Humanloop + +from tests.integration.conftest import TestIdentifiers + + +def test_prompts_call( + humanloop_test_client: Humanloop, + prompt: TestIdentifiers, + test_prompt_config: TestIdentifiers, +) -> None: + response = humanloop_test_client.prompts.call( # type: ignore [attr-defined] + path=prompt.file_path, + prompt={**test_prompt_config}, # type: ignore [misc, arg-type, typeddict-item, dict-item, list-item] + inputs={"question": "What is the capital of the France?"}, + ) + assert response is not None + assert response.log_id is not None + assert response.logs is not None + for log in response.logs: + assert log is not None + assert log.output is not None + assert "Paris" in log.output + assert response.prompt.path == prompt.file_path + + +def test_prompts_call_stream( + humanloop_test_client: Humanloop, + prompt: TestIdentifiers, + test_prompt_config: TestIdentifiers, +) -> None: + response = humanloop_test_client.prompts.call_stream( # type: ignore [attr-defined] + path=prompt.file_path, + prompt={**test_prompt_config}, # type: ignore [misc, arg-type, typeddict-item, dict-item, list-item] + inputs={"question": "What is the capital of the France?"}, + ) + + output = "" + for chunk in response: + assert chunk is not None + assert chunk.output is not None + assert chunk.id is not None + assert chunk.prompt_id is not None + assert chunk.version_id is not None + output += chunk.output + + assert "Paris" in output