From b01e6f4bd858de5d8a2c8dcee3eae65aa8350035 Mon Sep 17 00:00:00 2001
From: fern-api <115122769+fern-api[bot]@users.noreply.github.com>
Date: Wed, 7 May 2025 17:14:49 +0000
Subject: [PATCH 01/39] Release 0.8.36
---
poetry.lock | 275 ++++-----
pyproject.toml | 2 +-
reference.md | 237 +++-----
src/humanloop/__init__.py | 28 +-
src/humanloop/agents/__init__.py | 12 +
src/humanloop/agents/client.py | 524 ++++--------------
src/humanloop/agents/raw_client.py | 247 +++++++--
src/humanloop/agents/requests/__init__.py | 6 +
.../requests/agent_log_request_agent.py | 6 +
.../requests/agents_call_request_agent.py | 6 +
.../agents_call_stream_request_agent.py | 6 +
src/humanloop/agents/types/__init__.py | 6 +
.../agents/types/agent_log_request_agent.py | 6 +
.../agents/types/agents_call_request_agent.py | 6 +
.../types/agents_call_stream_request_agent.py | 6 +
src/humanloop/core/client_wrapper.py | 4 +-
src/humanloop/datasets/client.py | 10 +-
src/humanloop/evaluators/client.py | 10 +-
src/humanloop/files/client.py | 48 +-
src/humanloop/files/raw_client.py | 40 +-
src/humanloop/flows/client.py | 10 +-
src/humanloop/logs/client.py | 1 -
src/humanloop/prompts/__init__.py | 12 +
src/humanloop/prompts/client.py | 78 ++-
src/humanloop/prompts/raw_client.py | 84 +--
src/humanloop/prompts/requests/__init__.py | 6 +
.../requests/prompt_log_request_prompt.py | 6 +
.../requests/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
src/humanloop/prompts/types/__init__.py | 6 +
.../types/prompt_log_request_prompt.py | 6 +
.../types/prompts_call_request_prompt.py | 6 +
.../prompts_call_stream_request_prompt.py | 6 +
.../requests/agent_continue_response.py | 202 -------
.../agent_continue_response_tool_choice.py | 8 -
.../agent_continue_stream_response.py | 19 -
.../agent_continue_stream_response_payload.py | 8 -
src/humanloop/requests/agent_response.py | 5 +
.../requests/populate_template_response.py | 5 +
src/humanloop/requests/prompt_response.py | 5 +
src/humanloop/tools/client.py | 10 +-
src/humanloop/types/__init__.py | 4 +-
.../types/agent_continue_response.py | 224 --------
.../agent_continue_response_tool_choice.py | 8 -
.../types/agent_continue_stream_response.py | 44 --
.../agent_continue_stream_response_payload.py | 8 -
src/humanloop/types/agent_response.py | 5 +
src/humanloop/types/file_sort_by.py | 5 +
.../types/populate_template_response.py | 5 +
src/humanloop/types/project_sort_by.py | 5 -
src/humanloop/types/prompt_response.py | 5 +
.../types/version_id_response_version.py | 8 +-
.../types/version_reference_response.py | 1 +
53 files changed, 945 insertions(+), 1357 deletions(-)
create mode 100644 src/humanloop/agents/requests/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/requests/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/agents/types/agent_log_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_request_agent.py
create mode 100644 src/humanloop/agents/types/agents_call_stream_request_agent.py
create mode 100644 src/humanloop/prompts/requests/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompt_log_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_request_prompt.py
create mode 100644 src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
delete mode 100644 src/humanloop/requests/agent_continue_response.py
delete mode 100644 src/humanloop/requests/agent_continue_response_tool_choice.py
delete mode 100644 src/humanloop/requests/agent_continue_stream_response.py
delete mode 100644 src/humanloop/requests/agent_continue_stream_response_payload.py
delete mode 100644 src/humanloop/types/agent_continue_response.py
delete mode 100644 src/humanloop/types/agent_continue_response_tool_choice.py
delete mode 100644 src/humanloop/types/agent_continue_stream_response.py
delete mode 100644 src/humanloop/types/agent_continue_stream_response_payload.py
create mode 100644 src/humanloop/types/file_sort_by.py
delete mode 100644 src/humanloop/types/project_sort_by.py
diff --git a/poetry.lock b/poetry.lock
index cfe8a240..afa2f8a6 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -13,13 +13,13 @@ files = [
[[package]]
name = "anthropic"
-version = "0.50.0"
+version = "0.51.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "anthropic-0.50.0-py3-none-any.whl", hash = "sha256:defbd79327ca2fa61fd7b9eb2f1627dfb1f69c25d49288c52e167ddb84574f80"},
- {file = "anthropic-0.50.0.tar.gz", hash = "sha256:42175ec04ce4ff2fa37cd436710206aadff546ee99d70d974699f59b49adc66f"},
+ {file = "anthropic-0.51.0-py3-none-any.whl", hash = "sha256:b8b47d482c9aa1f81b923555cebb687c2730309a20d01be554730c8302e0f62a"},
+ {file = "anthropic-0.51.0.tar.gz", hash = "sha256:6f824451277992af079554430d5b2c8ff5bc059cc2c968cdc3f06824437da201"},
]
[package.dependencies]
@@ -89,103 +89,103 @@ files = [
[[package]]
name = "charset-normalizer"
-version = "3.4.1"
+version = "3.4.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
files = [
- {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
- {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
- {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"},
+ {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"},
+ {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"},
]
[[package]]
@@ -384,13 +384,13 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.1"
+version = "0.24.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
- {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
+ {file = "groq-0.24.0-py3-none-any.whl", hash = "sha256:0020e6b0b2b267263c9eb7c318deef13c12f399c6525734200b11d777b00088e"},
+ {file = "groq-0.24.0.tar.gz", hash = "sha256:e821559de8a77fb81d2585b3faec80ff923d6d64fd52339b33f6c94997d6f7f5"},
]
[package.dependencies]
@@ -412,6 +412,26 @@ files = [
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
+[[package]]
+name = "hf-xet"
+version = "1.1.0"
+description = ""
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "hf_xet-1.1.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0322c42551e275fcb7949c083a54a81b2898e50787c9aa74284fcb8d2c58c12c"},
+ {file = "hf_xet-1.1.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:667153a0304ac2debf2af95a8ff7687186f885b493f4cd16344869af270cd110"},
+ {file = "hf_xet-1.1.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:995eeffb119636ea617b96c7d7bf3c3f5ea8727fa57974574e25d700b8532d48"},
+ {file = "hf_xet-1.1.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3aee847da362393331f515c4010d0aaa1c2669acfcca1f4b28946d6949cc0086"},
+ {file = "hf_xet-1.1.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:68c5813a6074aa36e12ef5983230e3b03148cce61e0fcdd294096493795565b4"},
+ {file = "hf_xet-1.1.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4ee9222bf9274b1c198b88a929de0b5a49349c4962d89c5b3b2f0f7f47d9761c"},
+ {file = "hf_xet-1.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:73153eab9abf3d6973b21e94a67ccba5d595c3e12feb8c0bf50be02964e7f126"},
+ {file = "hf_xet-1.1.0.tar.gz", hash = "sha256:a7c2a4c2b6eee9ce0a1a367a82b60d95ba634420ef1c250addad7aa4af419cf4"},
+]
+
+[package.extras]
+tests = ["pytest"]
+
[[package]]
name = "httpcore"
version = "1.0.9"
@@ -470,18 +490,19 @@ files = [
[[package]]
name = "huggingface-hub"
-version = "0.30.2"
+version = "0.31.1"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
files = [
- {file = "huggingface_hub-0.30.2-py3-none-any.whl", hash = "sha256:68ff05969927058cfa41df4f2155d4bb48f5f54f719dd0390103eefa9b191e28"},
- {file = "huggingface_hub-0.30.2.tar.gz", hash = "sha256:9a7897c5b6fd9dad3168a794a8998d6378210f5b9688d0dfc180b1a228dc2466"},
+ {file = "huggingface_hub-0.31.1-py3-none-any.whl", hash = "sha256:43f73124819b48b42d140cbc0d7a2e6bd15b2853b1b9d728d4d55ad1750cac5b"},
+ {file = "huggingface_hub-0.31.1.tar.gz", hash = "sha256:492bb5f545337aa9e2f59b75ef4c5f535a371e8958a6ce90af056387e67f1180"},
]
[package.dependencies]
filelock = "*"
fsspec = ">=2023.5.0"
+hf-xet = {version = ">=1.1.0,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""}
packaging = ">=20.9"
pyyaml = ">=5.1"
requests = "*"
@@ -494,7 +515,7 @@ cli = ["InquirerPy (==0.3.4)"]
dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
hf-transfer = ["hf-transfer (>=0.1.4)"]
-hf-xet = ["hf-xet (>=0.1.4)"]
+hf-xet = ["hf-xet (>=1.1.0,<2.0.0)"]
inference = ["aiohttp"]
quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.9.0)"]
tensorflow = ["graphviz", "pydot", "tensorflow"]
@@ -873,13 +894,13 @@ files = [
[[package]]
name = "openai"
-version = "1.76.2"
+version = "1.77.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
files = [
- {file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"},
- {file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"},
+ {file = "openai-1.77.0-py3-none-any.whl", hash = "sha256:07706e91eb71631234996989a8ea991d5ee56f0744ef694c961e0824d4f39218"},
+ {file = "openai-1.77.0.tar.gz", hash = "sha256:897969f927f0068b8091b4b041d1f8175bcf124f7ea31bab418bf720971223bc"},
]
[package.dependencies]
@@ -931,13 +952,13 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.40.2-py3-none-any.whl", hash = "sha256:94e3474dfcb65ada10a5d83056e9e43dc0afbaae43a55bba6b7712672e28d21a"},
- {file = "opentelemetry_instrumentation_anthropic-0.40.2.tar.gz", hash = "sha256:949156556ed4d908196984fac1a8ea3d16edcf9d7395d85729a0e7712b2f818f"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.3-py3-none-any.whl", hash = "sha256:152a7968d86ade48ffb4df526129def598e8e3eeb1c4fb11a5e6a3bbc94c0fd4"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.3.tar.gz", hash = "sha256:5e40a9d3342d800180d29e028f3d1aa5db3e0ec482362a7eef054ee500fb8d4f"},
]
[package.dependencies]
@@ -948,13 +969,13 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.40.2-py3-none-any.whl", hash = "sha256:a12331e2cd77eb61f954acbaa50cdf31954f2b315b52da6354284ce0b83f2773"},
- {file = "opentelemetry_instrumentation_bedrock-0.40.2.tar.gz", hash = "sha256:a1d49d41d8435ba368698a884ffbd4fbda1f1325d6961b805706ee0bbbc6547f"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.3-py3-none-any.whl", hash = "sha256:cc8ea0358f57876ad12bbcbc9b1ace4b97bf9d8d5d7703a7a55135811b0b433a"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.3.tar.gz", hash = "sha256:bcb5060060d0ec25bd8c08332eadd23d57ceea1e042fed5e7a7664e5a2fcb817"},
]
[package.dependencies]
@@ -967,13 +988,13 @@ tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_cohere-0.40.2-py3-none-any.whl", hash = "sha256:96fde68b0d8ce68f272f4c54f30178cb22cbadb196735a3943cc328891a9d508"},
- {file = "opentelemetry_instrumentation_cohere-0.40.2.tar.gz", hash = "sha256:df3cac041b0769540f2362d8280e7f0179ff1446e47fb2542f22d91822c30fc4"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.3-py3-none-any.whl", hash = "sha256:d39d058ae5cffe02908c1c242b71dc449d07df71c60d7a37159a898e38c6a15c"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.3.tar.gz", hash = "sha256:23f6f237f7cdef661549b3f7d02dc8b1c69ce0cbf3e0050c05ce3f443458fe35"},
]
[package.dependencies]
@@ -984,13 +1005,13 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_groq-0.40.2-py3-none-any.whl", hash = "sha256:32e9220439b8356f33edbafbfd8b7f4ea063c1465ff29389abefcc93eca19530"},
- {file = "opentelemetry_instrumentation_groq-0.40.2.tar.gz", hash = "sha256:c127d089a5aec9f49ed9ba6bdbd00d67af596040a778eaef3641cd18d114ae93"},
+ {file = "opentelemetry_instrumentation_groq-0.40.3-py3-none-any.whl", hash = "sha256:3c3c324ab0b49323f268dc54b60fe06aaee04b5bac0f901a70251d4931b611bc"},
+ {file = "opentelemetry_instrumentation_groq-0.40.3.tar.gz", hash = "sha256:b246b258d28ac5af429688b9948c37ced88cba4a7f8f99f629f9b42fcbe36e47"},
]
[package.dependencies]
@@ -1001,13 +1022,13 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_openai-0.40.2-py3-none-any.whl", hash = "sha256:62fe130f16f2933f1db75f9a14807bb08444534fd8d2e6ad4668ee8b1c3968a5"},
- {file = "opentelemetry_instrumentation_openai-0.40.2.tar.gz", hash = "sha256:61e46e7a9e3f5d7fb0cef82f1fd7bd6a26848a28ec384249875fe5622ddbf622"},
+ {file = "opentelemetry_instrumentation_openai-0.40.3-py3-none-any.whl", hash = "sha256:77e55609fef78d1a81a61aeac667b6423d19f3f3936c4a219963fba0559dae44"},
+ {file = "opentelemetry_instrumentation_openai-0.40.3.tar.gz", hash = "sha256:8e7f260f3c3e25f445281238552c80cbd724c2d61fee4ad9360a86a2e0015114"},
]
[package.dependencies]
@@ -1019,13 +1040,13 @@ tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
files = [
- {file = "opentelemetry_instrumentation_replicate-0.40.2-py3-none-any.whl", hash = "sha256:ab6234081ae9803981e8e6302524bd25fc3d0e38e9a939bee6ad15f85405ccb8"},
- {file = "opentelemetry_instrumentation_replicate-0.40.2.tar.gz", hash = "sha256:e7edf785c07e94c951f8268ff1204e00b1fcc86059b3475ac04e01b74f9785c6"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.3-py3-none-any.whl", hash = "sha256:24a3ae27137521a4f4cfa523c36be7cedf2ad57eed9fef5f1bfb2a3e2c8aee9e"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.3.tar.gz", hash = "sha256:b62dcee6b8afe6dc30ad98b18014fdd1b6851fc5bd6d0c4dc6c40bff16ce407d"},
]
[package.dependencies]
@@ -1092,13 +1113,13 @@ files = [
[[package]]
name = "orderly-set"
-version = "5.4.0"
+version = "5.4.1"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
files = [
- {file = "orderly_set-5.4.0-py3-none-any.whl", hash = "sha256:f0192a7f9ae3385b587b71688353fae491d1ca45878496eb71ea118be1623639"},
- {file = "orderly_set-5.4.0.tar.gz", hash = "sha256:c8ff5ba824abe4eebcbbdd3f646ff3648ad0dd52239319d90056d8d30b6cccdd"},
+ {file = "orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83"},
+ {file = "orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d"},
]
[[package]]
diff --git a/pyproject.toml b/pyproject.toml
index 9dddf812..73f2c3d4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -3,7 +3,7 @@ name = "humanloop"
[tool.poetry]
name = "humanloop"
-version = "0.8.36b1"
+version = "0.8.36"
description = ""
readme = "README.md"
authors = []
diff --git a/reference.md b/reference.md
index 26f361d0..beb8e609 100644
--- a/reference.md
+++ b/reference.md
@@ -202,7 +202,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -752,7 +757,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -1026,7 +1036,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -1269,7 +1284,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Prompts by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Prompts by
@@ -3440,7 +3455,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Tools by
@@ -4742,7 +4757,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Datasets by
@@ -6321,7 +6336,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Evaluators by
@@ -8080,7 +8095,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Flows by
@@ -8858,52 +8873,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
-)
+client.agents.log()
```
@@ -9037,7 +9007,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9244,20 +9219,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
```
@@ -9468,7 +9431,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9654,15 +9622,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
-)
+client.agents.call()
```
@@ -9732,7 +9692,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -10018,14 +9983,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
```
@@ -10118,14 +10077,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.agents.list(
- size=1,
-)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
+client.agents.list()
```
@@ -10173,7 +10125,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Agents by
@@ -10241,42 +10193,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
```
@@ -10545,8 +10462,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
```
@@ -10624,10 +10541,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
```
@@ -10724,7 +10639,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.get(
- id="ag_1234567890",
+ id="id",
)
```
@@ -10810,7 +10725,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
```
@@ -10880,8 +10795,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
```
@@ -10975,7 +10889,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
```
@@ -11226,7 +11140,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
```
@@ -11299,12 +11213,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
```
@@ -11980,6 +11889,14 @@ client.files.list_files()
-
+**path:** `typing.Optional[str]` — Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
+
+
+
+
+-
+
**template:** `typing.Optional[bool]` — Filter to include only template files.
@@ -12004,7 +11921,7 @@ client.files.list_files()
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort files by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort files by
@@ -12020,6 +11937,14 @@ client.files.list_files()
-
+**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -12098,6 +12023,14 @@ client.files.retrieve_by_path(
-
+**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 8485d75c..8e7fa495 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -93,6 +93,7 @@
FileId,
FilePath,
FileRequest,
+ FileSortBy,
FileType,
FilesToolType,
FlowKernelRequest,
@@ -150,7 +151,6 @@
PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
- ProjectSortBy,
PromptCallLogResponse,
PromptCallResponse,
PromptCallResponseToolChoice,
@@ -204,6 +204,8 @@
from .errors import UnprocessableEntityError
from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
from .agents import (
+ AgentLogRequestAgent,
+ AgentLogRequestAgentParams,
AgentLogRequestToolChoice,
AgentLogRequestToolChoiceParams,
AgentRequestReasoningEffort,
@@ -214,8 +216,12 @@
AgentRequestTemplateParams,
AgentRequestToolsItem,
AgentRequestToolsItemParams,
+ AgentsCallRequestAgent,
+ AgentsCallRequestAgentParams,
AgentsCallRequestToolChoice,
AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestAgentParams,
AgentsCallStreamRequestToolChoice,
AgentsCallStreamRequestToolChoiceParams,
)
@@ -242,6 +248,8 @@
)
from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams
from .prompts import (
+ PromptLogRequestPrompt,
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoice,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
@@ -252,8 +260,12 @@
PromptRequestStopParams,
PromptRequestTemplate,
PromptRequestTemplateParams,
+ PromptsCallRequestPrompt,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoice,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPrompt,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoice,
PromptsCallStreamRequestToolChoiceParams,
)
@@ -461,6 +473,8 @@
"AgentLinkedFileResponseFile",
"AgentLinkedFileResponseFileParams",
"AgentLinkedFileResponseParams",
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoice",
"AgentLogRequestToolChoiceParams",
"AgentLogResponse",
@@ -487,8 +501,12 @@
"AgentResponseTemplateParams",
"AgentResponseToolsItem",
"AgentResponseToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoice",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoice",
"AgentsCallStreamRequestToolChoiceParams",
"AnthropicRedactedThinkingContent",
@@ -622,6 +640,7 @@
"FilePathParams",
"FileRequest",
"FileRequestParams",
+ "FileSortBy",
"FileType",
"FilesToolType",
"FlowKernelRequest",
@@ -725,7 +744,6 @@
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
"PopulateTemplateResponseTemplateParams",
- "ProjectSortBy",
"PromptCallLogResponse",
"PromptCallLogResponseParams",
"PromptCallResponse",
@@ -742,6 +760,8 @@
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
"PromptKernelRequestTemplateParams",
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogResponse",
@@ -764,8 +784,12 @@
"PromptResponseStopParams",
"PromptResponseTemplate",
"PromptResponseTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
index 04260714..ab2a2f9e 100644
--- a/src/humanloop/agents/__init__.py
+++ b/src/humanloop/agents/__init__.py
@@ -1,25 +1,33 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ AgentLogRequestAgent,
AgentLogRequestToolChoice,
AgentRequestReasoningEffort,
AgentRequestStop,
AgentRequestTemplate,
AgentRequestToolsItem,
+ AgentsCallRequestAgent,
AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestAgent,
AgentsCallStreamRequestToolChoice,
)
from .requests import (
+ AgentLogRequestAgentParams,
AgentLogRequestToolChoiceParams,
AgentRequestReasoningEffortParams,
AgentRequestStopParams,
AgentRequestTemplateParams,
AgentRequestToolsItemParams,
+ AgentsCallRequestAgentParams,
AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgentParams,
AgentsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoice",
"AgentLogRequestToolChoiceParams",
"AgentRequestReasoningEffort",
@@ -30,8 +38,12 @@
"AgentRequestTemplateParams",
"AgentRequestToolsItem",
"AgentRequestToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoice",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoice",
"AgentsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
index 64f3de62..3cb31092 100644
--- a/src/humanloop/agents/client.py
+++ b/src/humanloop/agents/client.py
@@ -5,29 +5,24 @@
from .raw_client import RawAgentsClient
from ..requests.chat_message import ChatMessageParams
from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
-from ..requests.agent_kernel_request import AgentKernelRequestParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
from ..types.create_agent_log_response import CreateAgentLogResponse
from ..types.agent_log_response import AgentLogResponse
from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.agent_call_stream_response import AgentCallStreamResponse
from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
from ..types.agent_call_response import AgentCallResponse
from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
from ..types.agent_continue_call_response import AgentContinueCallResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
-from ..core.pagination import SyncPager
-from ..types.agent_response import AgentResponse
from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
from ..types.model_endpoints import ModelEndpoints
from .requests.agent_request_template import AgentRequestTemplateParams
from ..types.template_language import TemplateLanguage
@@ -36,6 +31,7 @@
from ..requests.response_format import ResponseFormatParams
from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
from ..types.list_agents import ListAgents
from ..types.file_environment_response import FileEnvironmentResponse
from ..requests.evaluator_activation_deactivation_request_activate_item import (
@@ -47,7 +43,6 @@
from ..types.agent_kernel_request import AgentKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawAgentsClient
-from ..core.pagination import AsyncPager
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -85,7 +80,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -164,8 +159,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -239,52 +237,7 @@ def log(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
- )
+ client.agents.log()
"""
response = self._raw_client.log(
version_id=version_id,
@@ -385,20 +338,8 @@ def update_log(
api_key="YOUR_API_KEY",
)
client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
"""
response = self._raw_client.update_log(
@@ -423,7 +364,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -482,8 +423,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -585,7 +529,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -644,8 +588,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -707,15 +654,7 @@ def call(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
- )
+ client.agents.call()
"""
response = self._raw_client.call(
version_id=version_id,
@@ -859,14 +798,8 @@ def continue_call(
api_key="YOUR_API_KEY",
)
client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
"""
response = self._raw_client.continue_call(
@@ -885,10 +818,10 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> SyncPager[AgentResponse]:
+ ) -> PaginatedDataAgentResponse:
"""
Get a list of all Agents.
@@ -906,7 +839,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Agents by
order : typing.Optional[SortOrder]
@@ -917,7 +850,7 @@ def list(
Returns
-------
- SyncPager[AgentResponse]
+ PaginatedDataAgentResponse
Successful Response
Examples
@@ -927,64 +860,18 @@ def list(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- response = client.agents.list(
- size=1,
- )
- for item in response:
- yield item
- # alternatively, you can paginate page-by-page
- for page in response.iter_pages():
- yield page
- """
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "agents",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ client.agents.list()
+ """
+ response = self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataAgentResponse,
- construct_type(
- type_=PaginatedDataAgentResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ return response.data
def upsert(
self,
@@ -1123,42 +1010,7 @@ def upsert(
api_key="YOUR_API_KEY",
)
client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
"""
response = self._raw_client.upsert(
@@ -1220,8 +1072,8 @@ def delete_agent_version(
api_key="YOUR_API_KEY",
)
client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
"""
response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
@@ -1269,10 +1121,8 @@ def patch_agent_version(
api_key="YOUR_API_KEY",
)
client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
"""
response = self._raw_client.patch_agent_version(
@@ -1321,7 +1171,7 @@ def get(
api_key="YOUR_API_KEY",
)
client.agents.get(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.get(
@@ -1353,7 +1203,7 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
api_key="YOUR_API_KEY",
)
client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.delete(id, request_options=request_options)
@@ -1401,8 +1251,7 @@ def move(
api_key="YOUR_API_KEY",
)
client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
"""
response = self._raw_client.move(
@@ -1444,7 +1293,7 @@ def list_versions(
api_key="YOUR_API_KEY",
)
client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.list_versions(
@@ -1564,7 +1413,7 @@ def list_environments(
api_key="YOUR_API_KEY",
)
client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.list_environments(id, request_options=request_options)
@@ -1610,12 +1459,7 @@ def update_monitoring(
api_key="YOUR_API_KEY",
)
client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
"""
response = self._raw_client.update_monitoring(
@@ -1630,7 +1474,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize an Agent to the .agent file format.
@@ -1656,7 +1500,8 @@ def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
@@ -1740,7 +1585,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1819,8 +1664,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1899,52 +1747,7 @@ async def log(
async def main() -> None:
- await client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
- )
+ await client.agents.log()
asyncio.run(main())
@@ -2053,20 +1856,8 @@ async def update_log(
async def main() -> None:
await client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
@@ -2094,7 +1885,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2153,8 +1944,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2265,7 +2059,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2324,8 +2118,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2392,15 +2189,7 @@ async def call(
async def main() -> None:
- await client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
- )
+ await client.agents.call()
asyncio.run(main())
@@ -2561,14 +2350,8 @@ async def continue_call(
async def main() -> None:
await client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
@@ -2590,10 +2373,10 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncPager[AgentResponse]:
+ ) -> PaginatedDataAgentResponse:
"""
Get a list of all Agents.
@@ -2611,7 +2394,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Agents by
order : typing.Optional[SortOrder]
@@ -2622,7 +2405,7 @@ async def list(
Returns
-------
- AsyncPager[AgentResponse]
+ PaginatedDataAgentResponse
Successful Response
Examples
@@ -2637,67 +2420,21 @@ async def list(
async def main() -> None:
- response = await client.agents.list(
- size=1,
- )
- async for item in response:
- yield item
- # alternatively, you can paginate page-by-page
- async for page in response.iter_pages():
- yield page
+ await client.agents.list()
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "agents",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ response = await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataAgentResponse,
- construct_type(
- type_=PaginatedDataAgentResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ return response.data
async def upsert(
self,
@@ -2841,42 +2578,7 @@ async def upsert(
async def main() -> None:
await client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
@@ -2946,8 +2648,8 @@ async def delete_agent_version(
async def main() -> None:
await client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
@@ -3003,10 +2705,8 @@ async def patch_agent_version(
async def main() -> None:
await client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
@@ -3063,7 +2763,7 @@ async def get(
async def main() -> None:
await client.agents.get(
- id="ag_1234567890",
+ id="id",
)
@@ -3103,7 +2803,7 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
async def main() -> None:
await client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
@@ -3159,8 +2859,7 @@ async def move(
async def main() -> None:
await client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
@@ -3210,7 +2909,7 @@ async def list_versions(
async def main() -> None:
await client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
@@ -3354,7 +3053,7 @@ async def list_environments(
async def main() -> None:
await client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
@@ -3408,15 +3107,7 @@ async def update_monitoring(
async def main() -> None:
await client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {
- "evaluator_id": "ev_2345678901",
- "environment_id": "env_1234567890",
- },
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
@@ -3434,7 +3125,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize an Agent to the .agent file format.
@@ -3460,7 +3151,8 @@ async def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index b13491a6..8ce4fa79 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
-from ..requests.agent_kernel_request import AgentKernelRequestParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -19,14 +19,19 @@
from ..types.agent_log_response import AgentLogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.agent_call_stream_response import AgentCallStreamResponse
import httpx_sse
import contextlib
from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
from ..types.agent_call_response import AgentCallResponse
from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
from ..types.agent_continue_call_response import AgentContinueCallResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.agent_request_template import AgentRequestTemplateParams
from ..types.template_language import TemplateLanguage
@@ -73,7 +78,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -152,8 +157,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -247,7 +255,7 @@ def log(
object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -408,7 +416,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -467,8 +475,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -540,7 +551,7 @@ def call_stream(
object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -611,7 +622,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -670,8 +681,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -743,7 +757,7 @@ def call(
object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -980,6 +994,86 @@ def continue_call(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def upsert(
self,
*,
@@ -1770,7 +1864,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[None]:
+ ) -> HttpResponse[str]:
"""
Serialize an Agent to the .agent file format.
@@ -1796,7 +1890,8 @@ def serialize(
Returns
-------
- HttpResponse[None]
+ HttpResponse[str]
+ Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"agents/{jsonable_encoder(id)}/serialize",
@@ -1809,7 +1904,7 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
@@ -1905,7 +2000,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1984,8 +2079,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2079,7 +2177,7 @@ async def log(
object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2240,7 +2338,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2299,8 +2397,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2372,7 +2473,7 @@ async def call_stream(
object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2443,7 +2544,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2502,8 +2603,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2575,7 +2679,7 @@ async def call(
object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2812,6 +2916,86 @@ async def continue_call(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
async def upsert(
self,
*,
@@ -3604,7 +3788,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[None]:
+ ) -> AsyncHttpResponse[str]:
"""
Serialize an Agent to the .agent file format.
@@ -3630,7 +3814,8 @@ async def serialize(
Returns
-------
- AsyncHttpResponse[None]
+ AsyncHttpResponse[str]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"agents/{jsonable_encoder(id)}/serialize",
@@ -3643,7 +3828,7 @@ async def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return AsyncHttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
index 78a8f9ec..06ce37ed 100644
--- a/src/humanloop/agents/requests/__init__.py
+++ b/src/humanloop/agents/requests/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_log_request_agent import AgentLogRequestAgentParams
from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
from .agent_request_stop import AgentRequestStopParams
from .agent_request_template import AgentRequestTemplateParams
from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_agent import AgentsCallRequestAgentParams
from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
__all__ = [
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoiceParams",
"AgentRequestReasoningEffortParams",
"AgentRequestStopParams",
"AgentRequestTemplateParams",
"AgentRequestToolsItemParams",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py
new file mode 100644
index 00000000..1c6a7987
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py
new file mode 100644
index 00000000..5c92d02b
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..e9018a18
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
index 73d98669..9c8a955c 100644
--- a/src/humanloop/agents/types/__init__.py
+++ b/src/humanloop/agents/types/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_log_request_agent import AgentLogRequestAgent
from .agent_log_request_tool_choice import AgentLogRequestToolChoice
from .agent_request_reasoning_effort import AgentRequestReasoningEffort
from .agent_request_stop import AgentRequestStop
from .agent_request_template import AgentRequestTemplate
from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_agent import AgentsCallRequestAgent
from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent
from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
__all__ = [
+ "AgentLogRequestAgent",
"AgentLogRequestToolChoice",
"AgentRequestReasoningEffort",
"AgentRequestStop",
"AgentRequestTemplate",
"AgentRequestToolsItem",
+ "AgentsCallRequestAgent",
"AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestAgent",
"AgentsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py
new file mode 100644
index 00000000..011a2b9d
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentLogRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py
new file mode 100644
index 00000000..5f663ad3
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..4b2654e9
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index 71036800..94cf9db0 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.36b1",
+ "User-Agent": "humanloop/0.8.36",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.36b1",
+ "X-Fern-SDK-Version": "0.8.36",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/datasets/client.py b/src/humanloop/datasets/client.py
index 0e70c39c..8ab493a3 100644
--- a/src/humanloop/datasets/client.py
+++ b/src/humanloop/datasets/client.py
@@ -3,7 +3,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawDatasetsClient
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.pagination import SyncPager
@@ -55,7 +55,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[DatasetResponse]:
@@ -76,7 +76,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Datasets by
order : typing.Optional[SortOrder]
@@ -857,7 +857,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[DatasetResponse]:
@@ -878,7 +878,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Datasets by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py
index 88ec32c8..7fb13a71 100644
--- a/src/humanloop/evaluators/client.py
+++ b/src/humanloop/evaluators/client.py
@@ -10,7 +10,7 @@
from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams
from ..core.request_options import RequestOptions
from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.evaluator_response import EvaluatorResponse
@@ -234,7 +234,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[EvaluatorResponse]:
@@ -255,7 +255,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Evaluators by
order : typing.Optional[SortOrder]
@@ -1032,7 +1032,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[EvaluatorResponse]:
@@ -1053,7 +1053,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Evaluators by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index 693b46cb..b4fd4274 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawFilesClient
from ..types.file_type import FileType
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
@@ -39,11 +39,13 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
@@ -60,6 +62,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -69,12 +74,15 @@ def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -96,11 +104,13 @@ def list_files(
page=page,
size=size,
name=name,
+ path=path,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_raw_file_content=include_raw_file_content,
request_options=request_options,
)
return response.data
@@ -110,6 +120,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -123,6 +134,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -143,7 +157,10 @@ def retrieve_by_path(
)
"""
response = self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path,
+ environment=environment,
+ include_raw_file_content=include_raw_file_content,
+ request_options=request_options,
)
return response.data
@@ -169,11 +186,13 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
@@ -190,6 +209,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -199,12 +221,15 @@ async def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -234,11 +259,13 @@ async def main() -> None:
page=page,
size=size,
name=name,
+ path=path,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_raw_file_content=include_raw_file_content,
request_options=request_options,
)
return response.data
@@ -248,6 +275,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -261,6 +289,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -289,6 +320,9 @@ async def main() -> None:
asyncio.run(main())
"""
response = await self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path,
+ environment=environment,
+ include_raw_file_content=include_raw_file_content,
+ request_options=request_options,
)
return response.data
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 01b48e03..02f371ac 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -3,7 +3,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from ..types.file_type import FileType
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
@@ -33,11 +33,13 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
@@ -56,6 +58,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -65,12 +70,15 @@ def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -86,11 +94,13 @@ def list_files(
"page": page,
"size": size,
"name": name,
+ "path": path,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_raw_file_content": include_raw_file_content,
},
request_options=request_options,
)
@@ -124,6 +134,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -137,6 +148,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -150,6 +164,7 @@ def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_raw_file_content": include_raw_file_content,
},
json={
"path": path,
@@ -196,11 +211,13 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
@@ -219,6 +236,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -228,12 +248,15 @@ async def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -249,11 +272,13 @@ async def list_files(
"page": page,
"size": size,
"name": name,
+ "path": path,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_raw_file_content": include_raw_file_content,
},
request_options=request_options,
)
@@ -287,6 +312,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -300,6 +326,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -313,6 +342,7 @@ async def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_raw_file_content": include_raw_file_content,
},
json={
"path": path,
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index bcb9491c..9ba82a6a 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -11,7 +11,7 @@
from ..types.create_flow_log_response import CreateFlowLogResponse
from ..types.flow_log_response import FlowLogResponse
from ..types.flow_response import FlowResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.paginated_data_flow_response import PaginatedDataFlowResponse
@@ -469,7 +469,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[FlowResponse]:
@@ -490,7 +490,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Flows by
order : typing.Optional[SortOrder]
@@ -1418,7 +1418,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[FlowResponse]:
@@ -1439,7 +1439,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Flows by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 8733ed37..26cb2465 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -3,7 +3,6 @@
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawLogsClient
import typing
-from ..types.version_status import VersionStatus
import datetime as dt
from ..core.request_options import RequestOptions
from ..core.pagination import SyncPager
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index ae141d57..557dcc5c 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -1,25 +1,33 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ PromptLogRequestPrompt,
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
+ PromptsCallRequestPrompt,
PromptsCallRequestToolChoice,
+ PromptsCallStreamRequestPrompt,
PromptsCallStreamRequestToolChoice,
)
from .requests import (
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
@@ -30,8 +38,12 @@
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index d5de327b..a81e8411 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -5,7 +5,7 @@
from .raw_client import RawPromptsClient
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -13,11 +13,13 @@
from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from ..types.log_response import LogResponse
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.prompt_response import PromptResponse
@@ -85,7 +87,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -166,8 +168,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -480,7 +485,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -538,8 +543,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -649,7 +657,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -707,8 +715,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -843,7 +854,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[PromptResponse]:
@@ -864,7 +875,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Prompts by
order : typing.Optional[SortOrder]
@@ -1607,7 +1618,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize a Prompt to the .prompt file format.
@@ -1633,7 +1644,8 @@ def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
@@ -1719,7 +1731,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1800,8 +1812,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2131,7 +2146,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2189,8 +2204,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2309,7 +2327,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2367,8 +2385,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2511,7 +2532,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[PromptResponse]:
@@ -2532,7 +2553,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Prompts by
order : typing.Optional[SortOrder]
@@ -3379,7 +3400,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize a Prompt to the .prompt file format.
@@ -3405,7 +3426,8 @@ async def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index 2b907d91..d7346742 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -20,11 +20,13 @@
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
import httpx_sse
import contextlib
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.prompt_request_template import PromptRequestTemplateParams
@@ -73,7 +75,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -154,8 +156,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -249,7 +254,7 @@ def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -496,7 +501,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -554,8 +559,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -633,7 +641,7 @@ def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -706,7 +714,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -764,8 +772,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -843,7 +854,7 @@ def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -1754,7 +1765,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[None]:
+ ) -> HttpResponse[str]:
"""
Serialize a Prompt to the .prompt file format.
@@ -1780,7 +1791,8 @@ def serialize(
Returns
-------
- HttpResponse[None]
+ HttpResponse[str]
+ Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"prompts/{jsonable_encoder(id)}/serialize",
@@ -1793,7 +1805,7 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
@@ -1889,7 +1901,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1970,8 +1982,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2065,7 +2080,7 @@ async def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2312,7 +2327,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2370,8 +2385,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2449,7 +2467,7 @@ async def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2522,7 +2540,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2580,8 +2598,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2659,7 +2680,7 @@ async def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -3572,7 +3593,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[None]:
+ ) -> AsyncHttpResponse[str]:
"""
Serialize a Prompt to the .prompt file format.
@@ -3598,7 +3619,8 @@ async def serialize(
Returns
-------
- AsyncHttpResponse[None]
+ AsyncHttpResponse[str]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"prompts/{jsonable_encoder(id)}/serialize",
@@ -3611,7 +3633,7 @@ async def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return AsyncHttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index 3971e252..ae1cfb6a 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPromptParams
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
+from .prompts_call_request_prompt import PromptsCallRequestPromptParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
__all__ = [
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
"PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
new file mode 100644
index 00000000..8473bb42
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
new file mode 100644
index 00000000..7a236235
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..9524425b
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 1b849e7d..40326bce 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPrompt
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
+from .prompts_call_request_prompt import PromptsCallRequestPrompt
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice
__all__ = [
+ "PromptLogRequestPrompt",
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
"PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
+ "PromptsCallRequestPrompt",
"PromptsCallRequestToolChoice",
+ "PromptsCallStreamRequestPrompt",
"PromptsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py
new file mode 100644
index 00000000..4a0791dc
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py
new file mode 100644
index 00000000..78a9f5a1
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..71376823
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py
deleted file mode 100644
index 8300667b..00000000
--- a/src/humanloop/requests/agent_continue_response.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .chat_message import ChatMessageParams
-import typing
-from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
-from .agent_response import AgentResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
-from .evaluator_log_response import EvaluatorLogResponseParams
-from .log_response import LogResponseParams
-
-
-class AgentContinueResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing_extensions.NotRequired[int]
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing_extensions.NotRequired[str]
- """
- Reason the generation finished.
- """
-
- messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams]
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponseParams
- """
- Agent that generated the Log.
- """
-
- start_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event started.
- """
-
- end_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event ended.
- """
-
- output: typing_extensions.NotRequired[str]
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing_extensions.NotRequired[dt.datetime]
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing_extensions.NotRequired[str]
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing_extensions.NotRequired[float]
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing_extensions.NotRequired[str]
- """
- Captured log and debug statements.
- """
-
- provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw request sent to provider.
- """
-
- provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw response received the provider.
- """
-
- inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- The inputs passed to the prompt template.
- """
-
- source: typing_extensions.NotRequired[str]
- """
- Identifies where the model was called from.
- """
-
- metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Any additional metadata to record.
- """
-
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing_extensions.NotRequired[str]
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing_extensions.NotRequired[str]
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing_extensions.NotRequired[typing.Sequence[str]]
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing_extensions.NotRequired[str]
- """
- End-user ID related to the Log.
- """
-
- environment: typing_extensions.NotRequired[str]
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing_extensions.NotRequired[bool]
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing_extensions.NotRequired[str]
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py
deleted file mode 100644
index 24b044cc..00000000
--- a/src/humanloop/requests/agent_continue_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoiceParams
-
-AgentContinueResponseToolChoiceParams = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
-]
diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py
deleted file mode 100644
index 1038e000..00000000
--- a/src/humanloop/requests/agent_continue_stream_response.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
-from ..types.event_type import EventType
-import datetime as dt
-
-
-class AgentContinueStreamResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams]
- type: EventType
- created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py
deleted file mode 100644
index ddd74c10..00000000
--- a/src/humanloop/requests/agent_continue_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponseParams
-from .log_response import LogResponseParams
-from .tool_call import ToolCallParams
-
-AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py
index 047904a7..08303ec0 100644
--- a/src/humanloop/requests/agent_response.py
+++ b/src/humanloop/requests/agent_response.py
@@ -235,3 +235,8 @@ class AgentResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Agent Version.
"""
+
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Agent. Corresponds to the .agent file.
+ """
diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py
index 491cacd3..09f68797 100644
--- a/src/humanloop/requests/populate_template_response.py
+++ b/src/humanloop/requests/populate_template_response.py
@@ -218,6 +218,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams]
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py
index b6ff03df..1918f87b 100644
--- a/src/humanloop/requests/prompt_response.py
+++ b/src/humanloop/requests/prompt_response.py
@@ -220,3 +220,8 @@ class PromptResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Prompt Version.
"""
+
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index ea6b14a2..f58fc3d8 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -10,7 +10,7 @@
from ..types.tool_call_response import ToolCallResponse
from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.tool_response import ToolResponse
@@ -479,7 +479,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[ToolResponse]:
@@ -500,7 +500,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Tools by
order : typing.Optional[SortOrder]
@@ -1666,7 +1666,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[ToolResponse]:
@@ -1687,7 +1687,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Tools by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 7c1d30f5..7d863134 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -94,6 +94,7 @@
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
+from .file_sort_by import FileSortBy
from .file_type import FileType
from .files_tool_type import FilesToolType
from .flow_kernel_request import FlowKernelRequest
@@ -155,7 +156,6 @@
from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
-from .project_sort_by import ProjectSortBy
from .prompt_call_log_response import PromptCallLogResponse
from .prompt_call_response import PromptCallResponse
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
@@ -299,6 +299,7 @@
"FileId",
"FilePath",
"FileRequest",
+ "FileSortBy",
"FileType",
"FilesToolType",
"FlowKernelRequest",
@@ -356,7 +357,6 @@
"PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
- "ProjectSortBy",
"PromptCallLogResponse",
"PromptCallResponse",
"PromptCallResponseToolChoice",
diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py
deleted file mode 100644
index 0bbd7858..00000000
--- a/src/humanloop/types/agent_continue_response.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-from .agent_log_response import AgentLogResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .flow_log_response import FlowLogResponse
-from .prompt_log_response import PromptLogResponse
-from .tool_log_response import ToolLogResponse
-import typing
-from .chat_message import ChatMessage
-import pydantic
-from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
-import datetime as dt
-from .log_status import LogStatus
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class AgentContinueResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing.Optional[str] = pydantic.Field(default=None)
- """
- Reason the generation finished.
- """
-
- messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None)
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponse = pydantic.Field()
- """
- Agent that generated the Log.
- """
-
- start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event started.
- """
-
- end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event ended.
- """
-
- output: typing.Optional[str] = pydantic.Field(default=None)
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing.Optional[str] = pydantic.Field(default=None)
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing.Optional[float] = pydantic.Field(default=None)
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing.Optional[str] = pydantic.Field(default=None)
- """
- Captured log and debug statements.
- """
-
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw request sent to provider.
- """
-
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw response received the provider.
- """
-
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- The inputs passed to the prompt template.
- """
-
- source: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifies where the model was called from.
- """
-
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Any additional metadata to record.
- """
-
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing.Optional[str] = pydantic.Field(default=None)
- """
- End-user ID related to the Log.
- """
-
- environment: typing.Optional[str] = pydantic.Field(default=None)
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing.Optional[bool] = pydantic.Field(default=None)
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str = pydantic.Field()
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py
deleted file mode 100644
index 20f3fb75..00000000
--- a/src/humanloop/types/agent_continue_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoice
-
-AgentContinueResponseToolChoice = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
-]
diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py
deleted file mode 100644
index ff7a0fac..00000000
--- a/src/humanloop/types/agent_continue_stream_response.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import typing
-from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
-from .event_type import EventType
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
-
-
-class AgentContinueStreamResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing.Optional[AgentContinueStreamResponsePayload] = None
- type: EventType
- created_at: dt.datetime
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py
deleted file mode 100644
index 0e5f8a58..00000000
--- a/src/humanloop/types/agent_continue_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponse
-from .log_response import LogResponse
-from .tool_call import ToolCall
-
-AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py
index e58aaeba..fa72a361 100644
--- a/src/humanloop/types/agent_response.py
+++ b/src/humanloop/types/agent_response.py
@@ -237,6 +237,11 @@ class AgentResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Agent Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Agent. Corresponds to the .agent file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/file_sort_by.py b/src/humanloop/types/file_sort_by.py
new file mode 100644
index 00000000..b3135c3b
--- /dev/null
+++ b/src/humanloop/types/file_sort_by.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+FileSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py
index efcd1d0c..4dad70b1 100644
--- a/src/humanloop/types/populate_template_response.py
+++ b/src/humanloop/types/populate_template_response.py
@@ -231,6 +231,11 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None)
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/types/project_sort_by.py b/src/humanloop/types/project_sort_by.py
deleted file mode 100644
index b8265b56..00000000
--- a/src/humanloop/types/project_sort_by.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ProjectSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py
index 5d6ff870..59f00d1c 100644
--- a/src/humanloop/types/prompt_response.py
+++ b/src/humanloop/types/prompt_response.py
@@ -223,6 +223,11 @@ class PromptResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 1b74199f..b1cbd45d 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -3,6 +3,7 @@
from __future__ import annotations
import typing
from .dataset_response import DatasetResponse
+import typing
if typing.TYPE_CHECKING:
from .prompt_response import PromptResponse
@@ -11,10 +12,5 @@
from .flow_response import FlowResponse
from .agent_response import AgentResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse",
- "ToolResponse",
- DatasetResponse,
- "EvaluatorResponse",
- "FlowResponse",
- "AgentResponse",
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_reference_response.py b/src/humanloop/types/version_reference_response.py
index a6a7783c..399361c8 100644
--- a/src/humanloop/types/version_reference_response.py
+++ b/src/humanloop/types/version_reference_response.py
@@ -2,6 +2,7 @@
from __future__ import annotations
import typing
+import typing
if typing.TYPE_CHECKING:
from .version_deployment_response import VersionDeploymentResponse
From e3a524edd6299f4b4bc266df9b9a16a8939a115b Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 7 May 2025 18:24:22 +0100
Subject: [PATCH 02/39] Merge custom code
---
.fernignore | 2 +
pyproject.toml | 22 +-
src/humanloop/cli/__init__.py | 0
src/humanloop/cli/__main__.py | 256 +++++++++++++++++++
src/humanloop/cli/progress.py | 120 +++++++++
src/humanloop/client.py | 87 ++++++-
src/humanloop/overload.py | 108 +++++++-
src/humanloop/sync/__init__.py | 3 +
src/humanloop/sync/metadata_handler.py | 125 ++++++++++
src/humanloop/sync/sync_client.py | 327 +++++++++++++++++++++++++
tests/conftest.py | 14 +-
tests/sync/test_sync.py | 267 ++++++++++++++++++++
12 files changed, 1311 insertions(+), 20 deletions(-)
create mode 100644 src/humanloop/cli/__init__.py
create mode 100644 src/humanloop/cli/__main__.py
create mode 100644 src/humanloop/cli/progress.py
create mode 100644 src/humanloop/sync/__init__.py
create mode 100644 src/humanloop/sync/metadata_handler.py
create mode 100644 src/humanloop/sync/sync_client.py
create mode 100644 tests/sync/test_sync.py
diff --git a/.fernignore b/.fernignore
index 112f779b..d52ed17e 100644
--- a/.fernignore
+++ b/.fernignore
@@ -13,6 +13,8 @@ mypy.ini
README.md
src/humanloop/decorators
src/humanloop/otel
+src/humanloop/sync
+src/humanloop/cli/
## Tests
diff --git a/pyproject.toml b/pyproject.toml
index 73f2c3d4..8dc69a73 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,13 +1,20 @@
+# This section is used by PyPI and follows PEP 621 for package metadata
[project]
name = "humanloop"
+description = "The Humanloop Python Library"
+authors = []
+# This section is used by Poetry for development and building
+# The metadata here is used during development but not published to PyPI
[tool.poetry]
name = "humanloop"
-version = "0.8.36"
-description = ""
+version = "0.8.36b1"
+description = "Humanloop Python SDK"
readme = "README.md"
authors = []
-keywords = []
+packages = [
+ { include = "humanloop", from = "src" },
+]
classifiers = [
"Intended Audience :: Developers",
@@ -26,9 +33,6 @@ classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed"
]
-packages = [
- { include = "humanloop", from = "src"}
-]
[project.urls]
Repository = 'https://github.com/humanloop/humanloop-python'
@@ -53,8 +57,9 @@ protobuf = ">=5.29.3"
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
+click = "^8.0.0"
-[tool.poetry.dev-dependencies]
+[tool.poetry.group.dev.dependencies]
mypy = "1.0.1"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
@@ -86,7 +91,10 @@ plugins = ["pydantic.mypy"]
[tool.ruff]
line-length = 120
+[tool.poetry.scripts]
+humanloop = "humanloop.cli.__main__:cli"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
+
diff --git a/src/humanloop/cli/__init__.py b/src/humanloop/cli/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
new file mode 100644
index 00000000..ae5d1b43
--- /dev/null
+++ b/src/humanloop/cli/__main__.py
@@ -0,0 +1,256 @@
+import click
+import logging
+from pathlib import Path
+from typing import Optional, Callable
+from functools import wraps
+from dotenv import load_dotenv, find_dotenv
+import os
+import sys
+from humanloop import Humanloop
+from humanloop.sync.sync_client import SyncClient
+from datetime import datetime
+from humanloop.cli.progress import progress_context
+
+# Set up logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO) # Set back to INFO level
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s") # Simplified formatter
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+# Color constants
+SUCCESS_COLOR = "green"
+ERROR_COLOR = "red"
+INFO_COLOR = "blue"
+WARNING_COLOR = "yellow"
+
+MAX_FILES_TO_DISPLAY = 10
+
+def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None) -> Humanloop:
+ """Get a Humanloop client instance."""
+ if not api_key:
+ if env_file:
+ load_dotenv(env_file)
+ else:
+ env_path = find_dotenv()
+ if env_path:
+ load_dotenv(env_path)
+ else:
+ if os.path.exists(".env"):
+ load_dotenv(".env")
+ else:
+ load_dotenv()
+
+ api_key = os.getenv("HUMANLOOP_API_KEY")
+ if not api_key:
+ raise click.ClickException(
+ click.style("No API key found. Set HUMANLOOP_API_KEY in .env file or environment, or use --api-key", fg=ERROR_COLOR)
+ )
+
+ return Humanloop(api_key=api_key, base_url=base_url)
+
+def common_options(f: Callable) -> Callable:
+ """Decorator for common CLI options."""
+ @click.option(
+ "--api-key",
+ help="Humanloop API key. If not provided, uses HUMANLOOP_API_KEY from .env or environment.",
+ default=None,
+ show_default=False,
+ )
+ @click.option(
+ "--env-file",
+ help="Path to .env file. If not provided, looks for .env in current directory.",
+ default=None,
+ type=click.Path(exists=True),
+ show_default=False,
+ )
+ @click.option(
+ "--base-dir",
+ help="Base directory for pulled files",
+ default="humanloop",
+ type=click.Path(),
+ )
+ @click.option(
+ "--base-url",
+ default=None,
+ hidden=True,
+ )
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ return f(*args, **kwargs)
+ return wrapper
+
+def handle_sync_errors(f: Callable) -> Callable:
+ """Decorator for handling sync operation errors."""
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ click.echo(click.style(str(f"Error: {e}"), fg=ERROR_COLOR))
+ sys.exit(1)
+ return wrapper
+
+@click.group(
+ help="Humanloop CLI for managing sync operations.",
+ context_settings={
+ "help_option_names": ["-h", "--help"],
+ "max_content_width": 100,
+ }
+)
+def cli():
+ """Humanloop CLI for managing sync operations."""
+ pass
+
+@cli.command()
+@click.option(
+ "--path",
+ "-p",
+ help="Path to pull (file or directory). If not provided, pulls everything. "
+ "To pull a specific file, ensure the extension for the file is included (e.g. .prompt or .agent). "
+ "To pull a directory, simply specify the path to the directory (e.g. abc/def to pull all files under abc/def and its subdirectories).",
+ default=None,
+)
+@click.option(
+ "--environment",
+ "-e",
+ help="Environment to pull from (e.g. 'production', 'staging')",
+ default=None,
+)
+@click.option(
+ "--verbose",
+ "-v",
+ is_flag=True,
+ help="Show detailed information about the operation",
+)
+@handle_sync_errors
+@common_options
+def pull(
+ path: Optional[str],
+ environment: Optional[str],
+ api_key: Optional[str],
+ env_file: Optional[str],
+ base_dir: str,
+ base_url: Optional[str],
+ verbose: bool
+):
+ """Pull prompt and agent files from Humanloop to your local filesystem.
+
+ \b
+ This command will:
+ 1. Fetch prompt and agent files from your Humanloop workspace
+ 2. Save them to your local filesystem
+ 3. Maintain the same directory structure as in Humanloop
+ 4. Add appropriate file extensions (.prompt or .agent)
+
+ \b
+ The files will be saved with the following structure:
+ {base_dir}/
+ ├── prompts/
+ │ ├── my_prompt.prompt
+ │ └── nested/
+ │ └── another_prompt.prompt
+ └── agents/
+ └── my_agent.agent
+
+ The operation will overwrite existing files with the latest version from Humanloop
+ but will not delete local files that don't exist in the remote workspace.
+
+ Currently only supports syncing prompt and agent files. Other file types will be skipped."""
+ client = get_client(api_key, env_file, base_url)
+ sync_client = SyncClient(client, base_dir=base_dir, log_level=logging.DEBUG if verbose else logging.WARNING)
+
+ click.echo(click.style("Pulling files from Humanloop...", fg=INFO_COLOR))
+ click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
+ click.echo(click.style(f"Environment: {environment or '(default)'}", fg=INFO_COLOR))
+
+ if verbose:
+ # Don't use the spinner in verbose mode as the spinner and sync client logging compete
+ successful_files = sync_client.pull(path, environment)
+ else:
+ with progress_context("Pulling files..."):
+ successful_files = sync_client.pull(path, environment)
+
+ # Get metadata about the operation
+ metadata = sync_client.metadata.get_last_operation()
+ if metadata:
+ # Determine if the operation was successful based on failed_files
+ is_successful = not metadata.get('failed_files') and not metadata.get('error')
+ duration_color = SUCCESS_COLOR if is_successful else ERROR_COLOR
+ click.echo(click.style(f"Pull completed in {metadata['duration_ms']}ms", fg=duration_color))
+
+ if metadata['successful_files']:
+ click.echo(click.style(f"\nSuccessfully pulled {len(metadata['successful_files'])} files:", fg=SUCCESS_COLOR))
+
+ if verbose:
+ for file in metadata['successful_files']:
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+ else:
+ files_to_display = metadata['successful_files'][:MAX_FILES_TO_DISPLAY]
+ for file in files_to_display:
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+
+ if len(metadata['successful_files']) > MAX_FILES_TO_DISPLAY:
+ remaining = len(metadata['successful_files']) - MAX_FILES_TO_DISPLAY
+ click.echo(click.style(f" ...and {remaining} more", fg=SUCCESS_COLOR))
+ if metadata['failed_files']:
+ click.echo(click.style(f"\nFailed to pull {len(metadata['failed_files'])} files:", fg=ERROR_COLOR))
+ for file in metadata['failed_files']:
+ click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
+ if metadata.get('error'):
+ click.echo(click.style(f"\nError: {metadata['error']}", fg=ERROR_COLOR))
+
+def format_timestamp(timestamp: str) -> str:
+ """Format timestamp to a more readable format."""
+ try:
+ dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
+ return dt.strftime('%Y-%m-%d %H:%M:%S')
+ except (ValueError, AttributeError):
+ return timestamp
+
+@cli.command()
+@click.option(
+ "--oneline",
+ is_flag=True,
+ help="Display history in a single line per operation",
+)
+@handle_sync_errors
+@common_options
+def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str], oneline: bool):
+ """Show sync operation history."""
+ client = get_client(api_key, env_file, base_url)
+ sync_client = SyncClient(client, base_dir=base_dir)
+
+ history = sync_client.metadata.get_history()
+ if not history:
+ click.echo(click.style("No sync operations found in history.", fg=WARNING_COLOR))
+ return
+
+ if not oneline:
+ click.echo(click.style("Sync Operation History:", fg=INFO_COLOR))
+ click.echo(click.style("======================", fg=INFO_COLOR))
+
+ for op in history:
+ if oneline:
+ # Format: timestamp | operation_type | path | environment | duration_ms | status
+ status = click.style("✓", fg=SUCCESS_COLOR) if not op['failed_files'] else click.style("✗", fg=ERROR_COLOR)
+ click.echo(f"{format_timestamp(op['timestamp'])} | {op['operation_type']} | {op['path'] or '(root)'} | {op['environment'] or '-'} | {op['duration_ms']}ms | {status}")
+ else:
+ click.echo(click.style(f"\nOperation: {op['operation_type']}", fg=INFO_COLOR))
+ click.echo(f"Timestamp: {format_timestamp(op['timestamp'])}")
+ click.echo(f"Path: {op['path'] or '(root)'}")
+ if op['environment']:
+ click.echo(f"Environment: {op['environment']}")
+ click.echo(f"Duration: {op['duration_ms']}ms")
+ if op['successful_files']:
+ click.echo(click.style(f"Successfully {op['operation_type']}ed {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}", fg=SUCCESS_COLOR))
+ if op['failed_files']:
+ click.echo(click.style(f"Failed to {op['operation_type']}ed {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}", fg=ERROR_COLOR))
+ if op['error']:
+ click.echo(click.style(f"Error: {op['error']}", fg=ERROR_COLOR))
+ click.echo(click.style("----------------------", fg=INFO_COLOR))
+
+if __name__ == "__main__":
+ cli()
\ No newline at end of file
diff --git a/src/humanloop/cli/progress.py b/src/humanloop/cli/progress.py
new file mode 100644
index 00000000..67ef4506
--- /dev/null
+++ b/src/humanloop/cli/progress.py
@@ -0,0 +1,120 @@
+import sys
+import time
+from typing import Optional, Callable, Any
+from threading import Thread, Event
+from contextlib import contextmanager
+
+class Spinner:
+ """A simple terminal spinner for indicating progress."""
+
+ def __init__(
+ self,
+ message: str = "Loading...",
+ delay: float = 0.1,
+ spinner_chars: str = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
+ ):
+ self.message = message
+ self.delay = delay
+ self.spinner_chars = spinner_chars
+ self.stop_event = Event()
+ self.spinner_thread: Optional[Thread] = None
+
+ def _spin(self):
+ """The actual spinner animation."""
+ i = 0
+ while not self.stop_event.is_set():
+ sys.stdout.write(f"\r{self.spinner_chars[i]} {self.message}")
+ sys.stdout.flush()
+ i = (i + 1) % len(self.spinner_chars)
+ time.sleep(self.delay)
+
+ def start(self):
+ """Start the spinner animation."""
+ self.stop_event.clear()
+ self.spinner_thread = Thread(target=self._spin)
+ self.spinner_thread.daemon = True
+ self.spinner_thread.start()
+
+ def stop(self, final_message: Optional[str] = None):
+ """Stop the spinner and optionally display a final message."""
+ if self.spinner_thread is None:
+ return
+
+ self.stop_event.set()
+ self.spinner_thread.join()
+
+ # Clear the spinner line
+ sys.stdout.write("\r" + " " * (len(self.message) + 2) + "\r")
+
+ if final_message:
+ print(final_message)
+ sys.stdout.flush()
+
+ def update_message(self, message: str):
+ """Update the spinner message."""
+ self.message = message
+
+class ProgressTracker:
+ """A simple progress tracker that shows percentage completion."""
+
+ def __init__(
+ self,
+ total: int,
+ message: str = "Progress",
+ width: int = 40
+ ):
+ self.total = total
+ self.current = 0
+ self.message = message
+ self.width = width
+ self.start_time = time.time()
+
+ def update(self, increment: int = 1):
+ """Update the progress."""
+ self.current += increment
+ self._display()
+
+ def _display(self):
+ """Display the current progress."""
+ percentage = (self.current / self.total) * 100
+ filled = int(self.width * self.current / self.total)
+ bar = "█" * filled + "░" * (self.width - filled)
+
+ elapsed = time.time() - self.start_time
+ if self.current > 0:
+ rate = elapsed / self.current
+ eta = rate * (self.total - self.current)
+ time_str = f"ETA: {eta:.1f}s"
+ else:
+ time_str = "Calculating..."
+
+ sys.stdout.write(f"\r{self.message}: [{bar}] {percentage:.1f}% {time_str}")
+ sys.stdout.flush()
+
+ def finish(self, final_message: Optional[str] = None):
+ """Complete the progress bar and optionally show a final message."""
+ self._display()
+ print() # New line
+ if final_message:
+ print(final_message)
+
+@contextmanager
+def progress_context(message: str = "Loading...", success_message: str | None = None, error_message: str | None = None):
+ """Context manager for showing a spinner during an operation."""
+ spinner = Spinner(message)
+ spinner.start()
+ try:
+ yield spinner
+ spinner.stop(success_message)
+ except Exception as e:
+ spinner.stop(error_message)
+ raise
+
+def with_progress(message: str = "Loading..."):
+ """Decorator to add a spinner to a function."""
+ def decorator(func: Callable):
+ def wrapper(*args, **kwargs):
+ with progress_context(message) as spinner:
+ return func(*args, **kwargs)
+ return wrapper
+ return decorator
\ No newline at end of file
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 74cd6c97..996b75ad 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -18,7 +18,7 @@
)
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
-from humanloop.overload import overload_call, overload_log
+from humanloop.overload import overload_call, overload_log, overload_with_local_files
from humanloop.decorators.flow import flow as flow_decorator_factory
from humanloop.decorators.prompt import prompt_decorator_factory
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
@@ -29,6 +29,7 @@
from humanloop.otel.processor import HumanloopSpanProcessor
from humanloop.prompt_utils import populate_template
from humanloop.prompts.client import PromptsClient
+from humanloop.sync.sync_client import SyncClient, DEFAULT_CACHE_SIZE
class ExtendedEvalsClient(EvaluationsClient):
@@ -87,8 +88,9 @@ class Humanloop(BaseHumanloop):
"""
See docstring of :class:`BaseHumanloop`.
- This class extends the base client with custom evaluation utilities
- and decorators for declaring Files in code.
+ This class extends the base client with custom evaluation utilities,
+ decorators for declaring Files in code, and utilities for syncing
+ files between Humanloop and local filesystem.
"""
def __init__(
@@ -102,6 +104,9 @@ def __init__(
httpx_client: typing.Optional[httpx.Client] = None,
opentelemetry_tracer_provider: Optional[TracerProvider] = None,
opentelemetry_tracer: Optional[Tracer] = None,
+ use_local_files: bool = False,
+ files_directory: str = "humanloop",
+ cache_size: int = DEFAULT_CACHE_SIZE,
):
"""
Extends the base client with custom evaluation utilities and
@@ -111,6 +116,21 @@ def __init__(
You can provide a TracerProvider and a Tracer to integrate
with your existing telemetry system. If not provided,
an internal TracerProvider will be used.
+
+ Parameters
+ ----------
+ base_url: Optional base URL for the API
+ environment: The environment to use (default: DEFAULT)
+ api_key: Your Humanloop API key (default: from HUMANLOOP_API_KEY env var)
+ timeout: Optional timeout for API requests
+ follow_redirects: Whether to follow redirects
+ httpx_client: Optional custom httpx client
+ opentelemetry_tracer_provider: Optional tracer provider for telemetry
+ opentelemetry_tracer: Optional tracer for telemetry
+ use_local_files: Whether to use local files for prompts and agents
+ files_directory: Directory for local files (default: "humanloop")
+ cache_size: Maximum number of files to cache when use_local_files is True (default: DEFAULT_CACHE_SIZE).
+ This parameter has no effect if use_local_files is False.
"""
super().__init__(
base_url=base_url,
@@ -121,6 +141,12 @@ def __init__(
httpx_client=httpx_client,
)
+ self.use_local_files = use_local_files
+ self._sync_client = SyncClient(
+ client=self,
+ base_dir=files_directory,
+ cache_size=cache_size
+ )
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
@@ -130,6 +156,16 @@ def __init__(
# and the @flow decorator providing the trace_id
self.prompts = overload_log(client=self.prompts)
self.prompts = overload_call(client=self.prompts)
+ self.prompts = overload_with_local_files(
+ client=self.prompts,
+ sync_client=self._sync_client,
+ use_local_files=self.use_local_files
+ )
+ self.agents = overload_with_local_files(
+ client=self.agents,
+ sync_client=self._sync_client,
+ use_local_files=self.use_local_files
+ )
self.flows = overload_log(client=self.flows)
self.tools = overload_log(client=self.tools)
@@ -351,7 +387,50 @@ def agent():
attributes=attributes,
)
+ def pull(self,
+ environment: str | None = None,
+ path: str | None = None
+ ) -> List[str]:
+ """Pull Prompt and Agent files from Humanloop to local filesystem.
+
+ This method will:
+ 1. Fetch Prompt and Agent files from your Humanloop workspace
+ 2. Save them to the local filesystem using the client's files_directory (set during initialization)
+ 3. Maintain the same directory structure as in Humanloop
+ 4. Add appropriate file extensions (.prompt or .agent)
+
+ The path parameter can be used in two ways:
+ - If it points to a specific file (e.g. "path/to/file.prompt" or "path/to/file.agent"), only that file will be pulled
+ - If it points to a directory (e.g. "path/to/directory"), all Prompt and Agent files in that directory will be pulled
+ - If no path is provided, all Prompt and Agent files will be pulled
+
+ The operation will overwrite existing files with the latest version from Humanloop
+ but will not delete local files that don't exist in the remote workspace.
+
+ Currently only supports syncing prompt and agent files. Other file types will be skipped.
+
+ The files will be saved with the following structure:
+ ```
+ {files_directory}/
+ ├── prompts/
+ │ ├── my_prompt.prompt
+ │ └── nested/
+ │ └── another_prompt.prompt
+ └── agents/
+ └── my_agent.agent
+ ```
+
+ :param environment: The environment to pull the files from.
+ :param path: Optional path to either a specific file (e.g. "path/to/file.prompt") or a directory (e.g. "path/to/directory").
+ If not provided, all Prompt and Agent files will be pulled.
+ :return: List of successfully processed file paths.
+ """
+ return self._sync_client.pull(
+ environment=environment,
+ path=path
+ )
+
class AsyncHumanloop(AsyncBaseHumanloop):
"""
See docstring of AsyncBaseHumanloop.
@@ -359,4 +438,4 @@ class AsyncHumanloop(AsyncBaseHumanloop):
TODO: Add custom evaluation utilities for async case.
"""
- pass
+ pass
\ No newline at end of file
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index b0c83215..bd409236 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -1,19 +1,23 @@
import inspect
import logging
import types
+import warnings
from typing import TypeVar, Union
-
+from pathlib import Path
from humanloop.context import (
get_decorator_context,
get_evaluation_context,
get_trace_id,
)
-from humanloop.evals.run import HumanloopRuntimeError
+from humanloop.error import HumanloopRuntimeError
from humanloop.evaluators.client import EvaluatorsClient
from humanloop.flows.client import FlowsClient
from humanloop.prompts.client import PromptsClient
+from humanloop.agents.client import AgentsClient
from humanloop.tools.client import ToolsClient
+from humanloop.sync.sync_client import SyncClient
+from humanloop.types import FileType
from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
from humanloop.types.create_flow_log_response import CreateFlowLogResponse
from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
@@ -74,7 +78,7 @@ def _overload_log(
try:
response = self._log(**kwargs_eval)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
if eval_callback is not None:
eval_callback(response.id)
@@ -82,7 +86,7 @@ def _overload_log(
try:
response = self._log(**kwargs)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
return response
@@ -114,7 +118,7 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
try:
response = self._call(**kwargs)
except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
return response
@@ -122,3 +126,97 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
# Replace the original log method with the overloaded one
client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
return client
+
+def _get_file_type_from_client(client: Union[PromptsClient, AgentsClient]) -> FileType:
+ """Get the file type based on the client type."""
+ if isinstance(client, PromptsClient):
+ return "prompt"
+ elif isinstance(client, AgentsClient):
+ return "agent"
+ else:
+ raise ValueError(f"Unsupported client type: {type(client)}")
+
+def overload_with_local_files(
+ client: Union[PromptsClient, AgentsClient],
+ sync_client: SyncClient,
+ use_local_files: bool,
+) -> Union[PromptsClient, AgentsClient]:
+ """Overload call and log methods to handle local files when use_local_files is True.
+
+ When use_local_files is True, the following prioritization strategy is used:
+ 1. Direct Parameters: If {file_type} parameters are provided directly (as a PromptKernelRequestParams or AgentKernelRequestParams object),
+ these take precedence and the local file is ignored.
+ 2. Version/Environment: If version_id or environment is specified, the remote version is used instead
+ of the local file.
+ 3. Local File: If neither of the above are specified, attempts to use the local file at the given path.
+
+ For example, with a prompt client:
+ - If prompt={model: "gpt-4", ...} is provided, uses those parameters directly
+ - If version_id="123" is provided, uses that remote version
+ - Otherwise, tries to load from the local file at the given path
+
+ Args:
+ client: The client to overload (PromptsClient or AgentsClient)
+ sync_client: The sync client used for file operations
+ use_local_files: Whether to enable local file handling
+
+ Returns:
+ The client with overloaded methods
+
+ Raises:
+ HumanloopRuntimeError: If use_local_files is True and local file cannot be accessed
+ """
+ original_call = client._call if hasattr(client, '_call') else client.call
+ original_log = client._log if hasattr(client, '_log') else client.log
+ file_type = _get_file_type_from_client(client)
+
+ def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
+ if "id" in kwargs and "path" in kwargs:
+ raise HumanloopRuntimeError(f"Can only specify one of `id` or `path` when {function_name}ing a {file_type}")
+ # Handle local files if enabled
+ if use_local_files and "path" in kwargs:
+ # Check if version_id or environment is specified
+ has_version_info = "version_id" in kwargs or "environment" in kwargs
+ normalized_path = sync_client._normalize_path(kwargs["path"])
+
+ if has_version_info:
+ logger.warning(
+ f"Ignoring local file for `{normalized_path}` as version_id or environment was specified. "
+ "Using remote version instead."
+ )
+ else:
+ # Only use local file if no version info is specified
+ try:
+ # If file_type is already specified in kwargs, it means user provided a PromptKernelRequestParams object
+ if file_type in kwargs and not isinstance(kwargs[file_type], str):
+ logger.warning(
+ f"Ignoring local file for `{normalized_path}` as {file_type} parameters were directly provided. "
+ "Using provided parameters instead."
+ )
+ else:
+ file_content = sync_client.get_file_content(normalized_path, file_type)
+ kwargs[file_type] = file_content
+ except (HumanloopRuntimeError) as e:
+ # Re-raise with more context
+ raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
+
+ try:
+ if function_name == "call":
+ return original_call(**kwargs)
+ elif function_name == "log":
+ return original_log(**kwargs)
+ else:
+ raise ValueError(f"Unsupported function name: {function_name}")
+ except Exception as e:
+ # Re-raising as HumanloopRuntimeError so the decorators don't catch it
+ raise HumanloopRuntimeError from e
+
+ def _overload_call(self, **kwargs) -> PromptCallResponse:
+ return _overload(self, "call", **kwargs)
+
+ def _overload_log(self, **kwargs) -> PromptCallResponse:
+ return _overload(self, "log", **kwargs)
+
+ client.call = types.MethodType(_overload_call, client)
+ client.log = types.MethodType(_overload_log, client)
+ return client
\ No newline at end of file
diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py
new file mode 100644
index 00000000..007659df
--- /dev/null
+++ b/src/humanloop/sync/__init__.py
@@ -0,0 +1,3 @@
+from humanloop.sync.sync_client import SyncClient
+
+__all__ = ["SyncClient"]
diff --git a/src/humanloop/sync/metadata_handler.py b/src/humanloop/sync/metadata_handler.py
new file mode 100644
index 00000000..18de2e8a
--- /dev/null
+++ b/src/humanloop/sync/metadata_handler.py
@@ -0,0 +1,125 @@
+import json
+import time
+from datetime import datetime
+from pathlib import Path
+from typing import Dict, List, Optional, TypedDict, NotRequired
+import logging
+
+logger = logging.getLogger(__name__)
+
+class OperationData(TypedDict):
+ """Type definition for operation data structure."""
+ timestamp: str
+ operation_type: str
+ path: str
+ environment: NotRequired[Optional[str]]
+ successful_files: List[str]
+ failed_files: List[str]
+ error: NotRequired[Optional[str]]
+ duration_ms: float
+
+class Metadata(TypedDict):
+ """Type definition for the metadata structure."""
+ last_operation: Optional[OperationData]
+ history: List[OperationData]
+
+class MetadataHandler:
+ """Handles metadata storage and retrieval for sync operations.
+
+ This class manages a JSON file that stores the last 5 sync operations
+ and maintains a record of the most recent operation with detailed information.
+ """
+
+ def __init__(self, base_dir: Path, max_history: int = 5) -> None:
+ """Initialize the metadata handler.
+
+ Args:
+ base_dir: Base directory where metadata will be stored
+ max_history: Maximum number of operations to keep in history
+ """
+ self.base_dir = base_dir
+ self.metadata_file = base_dir / ".sync_metadata.json"
+ self.max_history = max_history
+ self._ensure_metadata_file()
+
+ def _ensure_metadata_file(self) -> None:
+ """Ensure the metadata file exists with proper structure."""
+ if not self.metadata_file.exists():
+ initial_data: Metadata = {
+ "last_operation": None,
+ "history": []
+ }
+ self._write_metadata(initial_data)
+
+ def _read_metadata(self) -> Metadata:
+ """Read the current metadata from file."""
+ try:
+ with open(self.metadata_file, 'r') as f:
+ return json.load(f)
+ except Exception as e:
+ logger.error(f"Error reading metadata file: {e}")
+ return {"last_operation": None, "history": []}
+
+ def _write_metadata(self, data: Metadata) -> None:
+ """Write metadata to file."""
+ try:
+ self.metadata_file.parent.mkdir(parents=True, exist_ok=True)
+ with open(self.metadata_file, 'w') as f:
+ json.dump(data, f, indent=2)
+ except Exception as e:
+ logger.error(f"Error writing metadata file: {e}")
+
+ def log_operation(
+ self,
+ operation_type: str,
+ path: str,
+ duration_ms: float,
+ environment: Optional[str] = None,
+ successful_files: Optional[List[str]] = None,
+ failed_files: Optional[List[str]] = None,
+ error: Optional[str] = None,
+ ) -> None:
+ """Log a sync operation.
+
+ Args:
+ operation_type: Type of operation (e.g., "pull", "push")
+ path: The path that was synced
+ duration_ms: Duration of the operation in milliseconds
+ environment: Optional environment name
+ successful_files: List of successfully processed files
+ failed_files: List of files that failed to process
+ error: Any error message if the operation failed
+ """
+ current_time = datetime.now().isoformat()
+
+ operation_data: OperationData = {
+ "timestamp": current_time,
+ "operation_type": operation_type,
+ "path": path,
+ "environment": environment,
+ "successful_files": successful_files or [],
+ "failed_files": failed_files or [],
+ "error": error,
+ "duration_ms": duration_ms
+ }
+
+ metadata = self._read_metadata()
+
+ # Update last operation
+ metadata["last_operation"] = operation_data
+
+ # Update history
+ metadata["history"].insert(0, operation_data)
+ metadata["history"] = metadata["history"][:self.max_history]
+
+ self._write_metadata(metadata)
+
+ def get_last_operation(self) -> Optional[OperationData]:
+ """Get the most recent operation details."""
+ metadata = self._read_metadata()
+ return metadata.get("last_operation")
+
+ def get_history(self) -> List[OperationData]:
+ """Get the operation history."""
+ metadata = self._read_metadata()
+ return metadata.get("history", [])
\ No newline at end of file
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
new file mode 100644
index 00000000..6b16cde0
--- /dev/null
+++ b/src/humanloop/sync/sync_client.py
@@ -0,0 +1,327 @@
+import logging
+from pathlib import Path
+from typing import List, TYPE_CHECKING, Optional
+from functools import lru_cache
+from humanloop.types import FileType
+from .metadata_handler import MetadataHandler
+import time
+from humanloop.error import HumanloopRuntimeError
+import json
+
+if TYPE_CHECKING:
+ from humanloop.base_client import BaseHumanloop
+
+# Set up logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO)
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s")
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+# Default cache size for file content caching
+DEFAULT_CACHE_SIZE = 100
+
+def format_api_error(error: Exception) -> str:
+ """Format API error messages to be more user-friendly."""
+ error_msg = str(error)
+ if "status_code" not in error_msg or "body" not in error_msg:
+ return error_msg
+
+ try:
+ # Extract the body part and parse as JSON
+ body_str = error_msg.split("body: ")[1]
+ # Convert Python dict string to valid JSON by replacing single quotes with double quotes
+ body_str = body_str.replace("'", '"')
+ body = json.loads(body_str)
+
+ # Get the detail from the body
+ detail = body.get("detail", {})
+
+ # Prefer description, fall back to msg
+ return detail.get("description") or detail.get("msg") or error_msg
+ except Exception as e:
+ logger.debug(f"Failed to parse error message: {str(e)}")
+ return error_msg
+
+class SyncClient:
+ """Client for managing synchronization between local filesystem and Humanloop.
+
+ This client provides file synchronization between Humanloop and the local filesystem,
+ with built-in caching for improved performance. The cache uses Python's LRU (Least
+ Recently Used) cache to automatically manage memory usage by removing least recently
+ accessed files when the cache is full.
+
+ The cache is automatically updated when files are pulled or saved, and can be
+ manually cleared using the clear_cache() method.
+ """
+
+ def __init__(
+ self,
+ client: "BaseHumanloop",
+ base_dir: str = "humanloop",
+ cache_size: int = DEFAULT_CACHE_SIZE,
+ log_level: int = logging.WARNING
+ ):
+ """
+ Parameters
+ ----------
+ client: Humanloop client instance
+ base_dir: Base directory for synced files (default: "humanloop")
+ cache_size: Maximum number of files to cache (default: DEFAULT_CACHE_SIZE)
+ log_level: Log level for logging (default: WARNING)
+ """
+ self.client = client
+ self.base_dir = Path(base_dir)
+ self._cache_size = cache_size
+
+ global logger
+ logger.setLevel(log_level)
+
+ # Create a new cached version of get_file_content with the specified cache size
+ self.get_file_content = lru_cache(maxsize=cache_size)(self._get_file_content_impl)
+ # Initialize metadata handler
+ self.metadata = MetadataHandler(self.base_dir)
+
+ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
+ """Implementation of get_file_content without the cache decorator.
+
+ This is the actual implementation that gets wrapped by lru_cache.
+
+ Args:
+ path: The normalized path to the file (without extension)
+ file_type: The type of file (Prompt or Agent)
+
+ Returns:
+ The raw file content
+
+ Raises:
+ HumanloopRuntimeError: If the file doesn't exist or can't be read
+ """
+ # Construct path to local file
+ local_path = self.base_dir / path
+ # Add appropriate extension
+ local_path = local_path.parent / f"{local_path.stem}.{file_type}"
+
+ if not local_path.exists():
+ raise HumanloopRuntimeError(f"Local file not found: {local_path}")
+
+ try:
+ # Read the raw file content
+ with open(local_path) as f:
+ file_content = f.read()
+ logger.debug(f"Using local file content from {local_path}")
+ return file_content
+ except Exception as e:
+ raise HumanloopRuntimeError(f"Error reading local file {local_path}: {str(e)}")
+
+ def get_file_content(self, path: str, file_type: FileType) -> str:
+ """Get the raw file content of a file from cache or filesystem.
+
+ This method uses an LRU cache to store file contents. When the cache is full,
+ the least recently accessed files are automatically removed to make space.
+
+ Args:
+ path: The normalized path to the file (without extension)
+ file_type: The type of file (Prompt or Agent)
+
+ Returns:
+ The raw file content
+
+ Raises:
+ HumanloopRuntimeError: If the file doesn't exist or can't be read
+ """
+ return self._get_file_content_impl(path, file_type)
+
+ def clear_cache(self) -> None:
+ """Clear the LRU cache."""
+ self.get_file_content.cache_clear()
+
+ def _normalize_path(self, path: str) -> str:
+ """Normalize the path by:
+ 1. Removing any file extensions (.prompt, .agent)
+ 2. Converting backslashes to forward slashes
+ 3. Removing leading and trailing slashes
+ 4. Removing leading and trailing whitespace
+ 5. Normalizing multiple consecutive slashes into a single forward slash
+
+ Args:
+ path: The path to normalize
+
+ Returns:
+ The normalized path
+ """
+ # Remove any file extensions
+ path = path.rsplit('.', 1)[0] if '.' in path else path
+
+ # Convert backslashes to forward slashes and normalize multiple slashes
+ path = path.replace('\\', '/')
+
+ # Remove leading/trailing whitespace and slashes
+ path = path.strip().strip('/')
+
+ # Normalize multiple consecutive slashes into a single forward slash
+ while '//' in path:
+ path = path.replace('//', '/')
+
+ return path
+
+ def is_file(self, path: str) -> bool:
+ """Check if the path is a file by checking for .prompt or .agent extension."""
+ return path.endswith('.prompt') or path.endswith('.agent')
+
+ def _save_serialized_file(self, serialized_content: str, file_path: str, file_type: FileType) -> None:
+ """Save serialized file to local filesystem."""
+ try:
+ # Create full path including base_dir prefix
+ full_path = self.base_dir / file_path
+ # Create directory if it doesn't exist
+ full_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Add file type extension
+ new_path = full_path.parent / f"{full_path.stem}.{file_type}"
+
+ # Write raw file content to file
+ with open(new_path, "w") as f:
+ f.write(serialized_content)
+
+ # Clear the cache for this file to ensure we get fresh content next time
+ self.clear_cache()
+ except Exception as e:
+ logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
+ raise
+
+ def _pull_file(self, path: str, environment: str | None = None) -> None:
+ """Pull a specific file from Humanloop to local filesystem."""
+ file = self.client.files.retrieve_by_path(
+ path=path,
+ environment=environment,
+ include_raw_file_content=True
+ )
+
+ if file.type not in ["prompt", "agent"]:
+ raise ValueError(f"Unsupported file type: {file.type}")
+
+ self._save_serialized_file(file.raw_file_content, file.path, file.type)
+
+ def _pull_directory(self,
+ path: str | None = None,
+ environment: str | None = None,
+ ) -> List[str]:
+ """Sync Prompt and Agent files from Humanloop to local filesystem."""
+ successful_files = []
+ failed_files = []
+ page = 1
+
+ logger.debug(f"Fetching files from directory: {path or '(root)'} in environment: {environment or '(default)'}")
+
+ while True:
+ try:
+ logger.debug(f"Requesting page {page} of files")
+ response = self.client.files.list_files(
+ type=["prompt", "agent"],
+ page=page,
+ include_raw_file_content=True,
+ environment=environment,
+ path=path
+ )
+
+ if len(response.records) == 0:
+ logger.debug("No more files found")
+ break
+
+ logger.debug(f"Found {len(response.records)} files from page {page}")
+
+ # Process each file
+ for file in response.records:
+ # Skip if not a Prompt or Agent
+ if file.type not in ["prompt", "agent"]:
+ logger.warning(f"Skipping unsupported file type: {file.type}")
+ continue
+
+ # Skip if no raw file content
+ if not getattr(file, "raw_file_content", None):
+ logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}")
+ continue
+
+ try:
+ logger.debug(f"Saving {file.type} {file.path}")
+ self._save_serialized_file(file.raw_file_content, file.path, file.type)
+ successful_files.append(file.path)
+ except Exception as e:
+ failed_files.append(file.path)
+ logger.error(f"Task failed for {file.path}: {str(e)}")
+
+ page += 1
+ except Exception as e:
+ formatted_error = format_api_error(e)
+ raise HumanloopRuntimeError(f"Failed to pull files: {formatted_error}")
+
+ if successful_files:
+ logger.info(f"Successfully pulled {len(successful_files)} files")
+ if failed_files:
+ logger.warning(f"Failed to pull {len(failed_files)} files")
+
+ return successful_files
+
+ def pull(self, path: str | None = None, environment: str | None = None) -> List[str]:
+ """Pull files from Humanloop to local filesystem.
+
+ If the path ends with .prompt or .agent, pulls that specific file.
+ Otherwise, pulls all files under the specified path.
+ If no path is provided, pulls all files from the root.
+
+ Args:
+ path: The path to pull from (either a specific file or directory)
+ environment: The environment to pull from
+
+ Returns:
+ List of successfully processed file paths
+ """
+ start_time = time.time()
+ normalized_path = self._normalize_path(path) if path else None
+
+ logger.info(f"Starting pull operation: path={normalized_path or '(root)'}, environment={environment or '(default)'}")
+ try:
+ if path is None:
+ # Pull all files from the root
+ logger.debug("Pulling all files from root")
+ successful_files = self._pull_directory(None, environment)
+ failed_files = [] # Failed files are already logged in _pull_directory
+ else:
+ if self.is_file(path.strip()):
+ logger.debug(f"Pulling specific file: {normalized_path}")
+ self._pull_file(normalized_path, environment)
+ successful_files = [path]
+ failed_files = []
+ else:
+ logger.debug(f"Pulling directory: {normalized_path}")
+ successful_files = self._pull_directory(normalized_path, environment)
+ failed_files = [] # Failed files are already logged in _pull_directory
+
+ duration_ms = int((time.time() - start_time) * 1000)
+ logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
+
+ # Log the successful operation
+ self.metadata.log_operation(
+ operation_type="pull",
+ path=normalized_path or "", # Use empty string if path is None
+ environment=environment,
+ successful_files=successful_files,
+ failed_files=failed_files,
+ duration_ms=duration_ms
+ )
+
+ return successful_files
+ except Exception as e:
+ duration_ms = int((time.time() - start_time) * 1000)
+ # Log the failed operation
+ self.metadata.log_operation(
+ operation_type="pull",
+ path=normalized_path or "", # Use empty string if path is None
+ environment=environment,
+ error=str(e),
+ duration_ms=duration_ms
+ )
+ raise
diff --git a/tests/conftest.py b/tests/conftest.py
index 80e3b336..a8c78ac5 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -191,8 +191,14 @@ def api_keys() -> APIKeys:
@pytest.fixture(scope="session")
-def humanloop_client(api_keys: APIKeys) -> Humanloop:
- return Humanloop(api_key=api_keys.humanloop)
+def humanloop_client(request, api_keys: APIKeys) -> Humanloop:
+ """Create a Humanloop client for testing."""
+ use_local_files = getattr(request, "param", False)
+ return Humanloop(
+ api_key=api_keys.humanloop,
+ base_url="http://localhost:80/v5",
+ use_local_files=use_local_files
+ )
@pytest.fixture(scope="session", autouse=True)
@@ -214,8 +220,8 @@ def directory_cleanup(directory_id: str, humanloop_client: Humanloop):
client = humanloop_client.evaluators # type: ignore [assignment]
elif file.type == "flow":
client = humanloop_client.flows # type: ignore [assignment]
- else:
- raise NotImplementedError(f"Unknown HL file type {file.type}")
+ elif file.type == "agent":
+ client = humanloop_client.agents # type: ignore [assignment]
client.delete(file_id)
for subdirectory in response.subdirectories:
diff --git a/tests/sync/test_sync.py b/tests/sync/test_sync.py
new file mode 100644
index 00000000..a2441b4b
--- /dev/null
+++ b/tests/sync/test_sync.py
@@ -0,0 +1,267 @@
+from typing import List, NamedTuple, Union
+from pathlib import Path
+import pytest
+from humanloop import Humanloop, FileType, AgentResponse, PromptResponse
+from humanloop.error import HumanloopRuntimeError
+
+
+class SyncableFile(NamedTuple):
+ path: str
+ type: FileType
+ model: str
+ id: str = ""
+ version_id: str = ""
+
+
+@pytest.fixture
+def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[SyncableFile]:
+ """Creates a predefined structure of files in Humanloop for testing sync"""
+ files: List[SyncableFile] = [
+ SyncableFile(
+ path="prompts/gpt-4",
+ type="prompt",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="prompts/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="prompts/nested/complex/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="agents/gpt-4",
+ type="agent",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="agents/gpt-4o",
+ type="agent",
+ model="gpt-4o",
+ ),
+ ]
+
+ # Create the files in Humanloop
+ created_files = []
+ for file in files:
+ full_path = get_test_path(file.path)
+ response: Union[AgentResponse, PromptResponse]
+ if file.type == "prompt":
+ response = humanloop_client.prompts.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ elif file.type == "agent":
+ response = humanloop_client.agents.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ created_files.append(SyncableFile(
+ path=full_path,
+ type=file.type,
+ model=file.model,
+ id=response.id,
+ version_id=response.version_id
+ ))
+
+ return created_files
+
+
+@pytest.fixture
+def cleanup_local_files():
+ """Cleanup any locally synced files after tests"""
+ yield
+ # Clean up the local humanloop directory after tests
+ local_dir = Path("humanloop")
+ if local_dir.exists():
+ import shutil
+
+ shutil.rmtree(local_dir)
+
+
+def test_pull_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that humanloop.sync() correctly syncs remote files to local filesystem"""
+ # Run the sync
+ successful_files = humanloop_client.pull()
+
+ # Verify each file was synced correctly
+ for file in test_file_structure:
+ # Get the extension based on file type: .prompt, .agent
+ extension = f".{file.type}"
+
+ # The local path should mirror the remote path structure
+ local_path = Path("humanloop") / f"{file.path}{extension}"
+
+ # Basic assertions
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Verify it's not empty
+ content = local_path.read_text()
+ assert content, f"File at {local_path} should not be empty"
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_with_local_files(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles local files.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test using the pulled files
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test call with pulled file
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(path=test_file.path, messages=[{"role": "user", "content": "Testing"}])
+ assert response is not None
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(path=test_file.path, messages=[{"role": "user", "content": "Testing"}])
+ assert response is not None
+
+ # Test with invalid path
+ with pytest.raises(HumanloopRuntimeError):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.call(path="invalid/path")
+ elif test_file.type == "agent":
+ humanloop_client.agents.call(path="invalid/path")
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_log_with_local_files(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles local files for log operations.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test logging using the pulled files
+
+ :param humanloop_client: The Humanloop client with local files enabled
+ :param test_file_structure: List of test files created in remote
+ :param cleanup_local_files: Fixture to clean up local files after test
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test log with pulled file
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.log(
+ path=test_file.path,
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ assert response is not None
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.log(
+ path=test_file.path,
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ assert response is not None
+
+ # Test with invalid path
+ with pytest.raises(HumanloopRuntimeError):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.log(
+ path="invalid/path",
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.log(
+ path="invalid/path",
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response"
+ )
+
+@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
+def test_overload_version_environment_handling(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+ """Test that overload_with_local_files correctly handles version_id and environment parameters.
+
+ Flow:
+ 1. Create files in remote (via test_file_structure fixture)
+ 2. Pull files locally
+ 3. Test that version_id/environment parameters cause remote usage with warning
+ """
+ # First pull the files locally
+ humanloop_client.pull()
+
+ # Test using the pulled files
+ test_file = test_file_structure[0] # Use the first test file
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # Verify the file was pulled correctly
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # Test with version_id - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
+
+ # Test with environment - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
+
+ # Test with both version_id and environment - should use remote with warning
+ with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
\ No newline at end of file
From 689880c3ba1ead0843e39b128749a7060d10a35b Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 7 May 2025 18:33:14 +0100
Subject: [PATCH 03/39] Update poetry.lock
---
poetry.lock | 143 ++++++++++++++++++++++++++++++++++++++++++++--------
1 file changed, 123 insertions(+), 20 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index afa2f8a6..3d6d9b40 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -6,6 +6,7 @@ version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
@@ -17,6 +18,7 @@ version = "0.51.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "anthropic-0.51.0-py3-none-any.whl", hash = "sha256:b8b47d482c9aa1f81b923555cebb687c2730309a20d01be554730c8302e0f62a"},
{file = "anthropic-0.51.0.tar.gz", hash = "sha256:6f824451277992af079554430d5b2c8ff5bc059cc2c968cdc3f06824437da201"},
@@ -41,6 +43,7 @@ version = "4.9.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"},
{file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"},
@@ -54,7 +57,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"]
-test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"]
+test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""]
trio = ["trio (>=0.26.1)"]
[[package]]
@@ -63,18 +66,19 @@ version = "25.3.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"},
{file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"},
]
[package.extras]
-benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"]
-tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
+tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""]
[[package]]
name = "certifi"
@@ -82,6 +86,7 @@ version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
{file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
@@ -93,6 +98,7 @@ version = "3.4.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"},
{file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"},
@@ -188,12 +194,28 @@ files = [
{file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"},
]
+[[package]]
+name = "click"
+version = "8.1.8"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+groups = ["main"]
+files = [
+ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
+ {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
[[package]]
name = "cohere"
version = "5.15.0"
description = ""
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["dev"]
files = [
{file = "cohere-5.15.0-py3-none-any.whl", hash = "sha256:22ff867c2a6f2fc2b585360c6072f584f11f275ef6d9242bac24e0fa2df1dfb5"},
{file = "cohere-5.15.0.tar.gz", hash = "sha256:e802d4718ddb0bb655654382ebbce002756a3800faac30296cde7f1bdc6ff2cc"},
@@ -216,10 +238,12 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+groups = ["main", "dev"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""}
[[package]]
name = "deepdiff"
@@ -227,6 +251,7 @@ version = "8.4.2"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "deepdiff-8.4.2-py3-none-any.whl", hash = "sha256:7e39e5b26f3747c54f9d0e8b9b29daab670c3100166b77cc0185d5793121b099"},
{file = "deepdiff-8.4.2.tar.gz", hash = "sha256:5c741c0867ebc7fcb83950ad5ed958369c17f424e14dee32a11c56073f4ee92a"},
@@ -245,6 +270,7 @@ version = "1.2.18"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+groups = ["main"]
files = [
{file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"},
{file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"},
@@ -254,7 +280,7 @@ files = [
wrapt = ">=1.10,<2"
[package.extras]
-dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
[[package]]
name = "distro"
@@ -262,6 +288,7 @@ version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
@@ -273,6 +300,8 @@ version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
@@ -287,6 +316,7 @@ version = "1.10.0"
description = "Fast read/write of AVRO files"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"},
{file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"},
@@ -333,6 +363,7 @@ version = "3.18.0"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"},
{file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"},
@@ -341,7 +372,7 @@ files = [
[package.extras]
docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"]
-typing = ["typing-extensions (>=4.12.2)"]
+typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""]
[[package]]
name = "fsspec"
@@ -349,6 +380,7 @@ version = "2025.3.2"
description = "File-system specification"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"},
{file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"},
@@ -388,6 +420,7 @@ version = "0.24.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "groq-0.24.0-py3-none-any.whl", hash = "sha256:0020e6b0b2b267263c9eb7c318deef13c12f399c6525734200b11d777b00088e"},
{file = "groq-0.24.0.tar.gz", hash = "sha256:e821559de8a77fb81d2585b3faec80ff923d6d64fd52339b33f6c94997d6f7f5"},
@@ -407,6 +440,7 @@ version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
@@ -418,6 +452,8 @@ version = "1.1.0"
description = ""
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
+markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""
files = [
{file = "hf_xet-1.1.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0322c42551e275fcb7949c083a54a81b2898e50787c9aa74284fcb8d2c58c12c"},
{file = "hf_xet-1.1.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:667153a0304ac2debf2af95a8ff7687186f885b493f4cd16344869af270cd110"},
@@ -438,6 +474,7 @@ version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
@@ -459,6 +496,7 @@ version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
@@ -471,7 +509,7 @@ httpcore = "==1.*"
idna = "*"
[package.extras]
-brotli = ["brotli", "brotlicffi"]
+brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
@@ -483,6 +521,7 @@ version = "0.4.0"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
@@ -494,6 +533,7 @@ version = "0.31.1"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
+groups = ["main", "dev"]
files = [
{file = "huggingface_hub-0.31.1-py3-none-any.whl", hash = "sha256:43f73124819b48b42d140cbc0d7a2e6bd15b2853b1b9d728d4d55ad1750cac5b"},
{file = "huggingface_hub-0.31.1.tar.gz", hash = "sha256:492bb5f545337aa9e2f59b75ef4c5f535a371e8958a6ce90af056387e67f1180"},
@@ -530,6 +570,7 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -544,6 +585,7 @@ version = "8.6.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"},
{file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"},
@@ -553,12 +595,12 @@ files = [
zipp = ">=3.20"
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
-test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
+test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]
[[package]]
@@ -567,6 +609,7 @@ version = "2.1.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"},
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
@@ -578,6 +621,7 @@ version = "0.9.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"},
{file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"},
@@ -663,6 +707,7 @@ version = "4.23.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
@@ -684,6 +729,7 @@ version = "2025.4.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"},
{file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"},
@@ -698,6 +744,7 @@ version = "5.1.0"
description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"},
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"},
@@ -796,6 +843,7 @@ version = "1.0.1"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
{file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
@@ -842,6 +890,7 @@ version = "1.1.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"},
{file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"},
@@ -853,6 +902,7 @@ version = "1.26.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
{file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
@@ -898,6 +948,7 @@ version = "1.77.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "openai-1.77.0-py3-none-any.whl", hash = "sha256:07706e91eb71631234996989a8ea991d5ee56f0744ef694c961e0824d4f39218"},
{file = "openai-1.77.0.tar.gz", hash = "sha256:897969f927f0068b8091b4b041d1f8175bcf124f7ea31bab418bf720971223bc"},
@@ -924,6 +975,7 @@ version = "1.32.1"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_api-1.32.1-py3-none-any.whl", hash = "sha256:bbd19f14ab9f15f0e85e43e6a958aa4cb1f36870ee62b7fd205783a112012724"},
{file = "opentelemetry_api-1.32.1.tar.gz", hash = "sha256:a5be71591694a4d9195caf6776b055aa702e964d961051a0715d05f8632c32fb"},
@@ -939,6 +991,7 @@ version = "0.53b1"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation-0.53b1-py3-none-any.whl", hash = "sha256:c07850cecfbc51e8b357f56d5886ae5ccaa828635b220d0f5e78f941ea9a83ca"},
{file = "opentelemetry_instrumentation-0.53b1.tar.gz", hash = "sha256:0e69ca2c75727e8a300de671c4a2ec0e86e63a8e906beaa5d6c9f5228e8687e5"},
@@ -956,6 +1009,7 @@ version = "0.40.3"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_anthropic-0.40.3-py3-none-any.whl", hash = "sha256:152a7968d86ade48ffb4df526129def598e8e3eeb1c4fb11a5e6a3bbc94c0fd4"},
{file = "opentelemetry_instrumentation_anthropic-0.40.3.tar.gz", hash = "sha256:5e40a9d3342d800180d29e028f3d1aa5db3e0ec482362a7eef054ee500fb8d4f"},
@@ -973,6 +1027,7 @@ version = "0.40.3"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_bedrock-0.40.3-py3-none-any.whl", hash = "sha256:cc8ea0358f57876ad12bbcbc9b1ace4b97bf9d8d5d7703a7a55135811b0b433a"},
{file = "opentelemetry_instrumentation_bedrock-0.40.3.tar.gz", hash = "sha256:bcb5060060d0ec25bd8c08332eadd23d57ceea1e042fed5e7a7664e5a2fcb817"},
@@ -992,6 +1047,7 @@ version = "0.40.3"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_cohere-0.40.3-py3-none-any.whl", hash = "sha256:d39d058ae5cffe02908c1c242b71dc449d07df71c60d7a37159a898e38c6a15c"},
{file = "opentelemetry_instrumentation_cohere-0.40.3.tar.gz", hash = "sha256:23f6f237f7cdef661549b3f7d02dc8b1c69ce0cbf3e0050c05ce3f443458fe35"},
@@ -1009,6 +1065,7 @@ version = "0.40.3"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_groq-0.40.3-py3-none-any.whl", hash = "sha256:3c3c324ab0b49323f268dc54b60fe06aaee04b5bac0f901a70251d4931b611bc"},
{file = "opentelemetry_instrumentation_groq-0.40.3.tar.gz", hash = "sha256:b246b258d28ac5af429688b9948c37ced88cba4a7f8f99f629f9b42fcbe36e47"},
@@ -1026,6 +1083,7 @@ version = "0.40.3"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_openai-0.40.3-py3-none-any.whl", hash = "sha256:77e55609fef78d1a81a61aeac667b6423d19f3f3936c4a219963fba0559dae44"},
{file = "opentelemetry_instrumentation_openai-0.40.3.tar.gz", hash = "sha256:8e7f260f3c3e25f445281238552c80cbd724c2d61fee4ad9360a86a2e0015114"},
@@ -1044,6 +1102,7 @@ version = "0.40.3"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_replicate-0.40.3-py3-none-any.whl", hash = "sha256:24a3ae27137521a4f4cfa523c36be7cedf2ad57eed9fef5f1bfb2a3e2c8aee9e"},
{file = "opentelemetry_instrumentation_replicate-0.40.3.tar.gz", hash = "sha256:b62dcee6b8afe6dc30ad98b18014fdd1b6851fc5bd6d0c4dc6c40bff16ce407d"},
@@ -1061,6 +1120,7 @@ version = "1.32.1"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_proto-1.32.1-py3-none-any.whl", hash = "sha256:fe56df31033ab0c40af7525f8bf4c487313377bbcfdf94184b701a8ccebc800e"},
{file = "opentelemetry_proto-1.32.1.tar.gz", hash = "sha256:bc6385ccf87768f029371535312071a2d09e6c9ebf119ac17dbc825a6a56ba53"},
@@ -1075,6 +1135,7 @@ version = "1.32.1"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_sdk-1.32.1-py3-none-any.whl", hash = "sha256:bba37b70a08038613247bc42beee5a81b0ddca422c7d7f1b097b32bf1c7e2f17"},
{file = "opentelemetry_sdk-1.32.1.tar.gz", hash = "sha256:8ef373d490961848f525255a42b193430a0637e064dd132fd2a014d94792a092"},
@@ -1091,6 +1152,7 @@ version = "0.53b1"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions-0.53b1-py3-none-any.whl", hash = "sha256:21df3ed13f035f8f3ea42d07cbebae37020367a53b47f1ebee3b10a381a00208"},
{file = "opentelemetry_semantic_conventions-0.53b1.tar.gz", hash = "sha256:4c5a6fede9de61211b2e9fc1e02e8acacce882204cd770177342b6a3be682992"},
@@ -1106,6 +1168,7 @@ version = "0.4.5"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions_ai-0.4.5-py3-none-any.whl", hash = "sha256:91e5c776d45190cebd88ea1cef021e231b5c04c448f5473fdaeb310f14e62b11"},
{file = "opentelemetry_semantic_conventions_ai-0.4.5.tar.gz", hash = "sha256:15e2540aa807fb6748f1bdc60da933ee2fb2e40f6dec48fde8facfd9e22550d7"},
@@ -1117,6 +1180,7 @@ version = "5.4.1"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83"},
{file = "orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d"},
@@ -1128,6 +1192,7 @@ version = "25.0"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
{file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
@@ -1139,6 +1204,7 @@ version = "2.2.3"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
{file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
@@ -1225,6 +1291,7 @@ version = "1.20.2"
description = "parse() is the opposite of format()"
optional = false
python-versions = "*"
+groups = ["main", "dev"]
files = [
{file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"},
{file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"},
@@ -1236,6 +1303,7 @@ version = "0.6.4"
description = "Simplifies to build parse types based on the parse module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,>=2.7"
+groups = ["dev"]
files = [
{file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"},
{file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"},
@@ -1246,9 +1314,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""}
six = ">=1.15"
[package.extras]
-develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"]
+develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"]
docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"]
-testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"]
+testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"]
[[package]]
name = "pluggy"
@@ -1256,6 +1324,7 @@ version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
@@ -1271,6 +1340,7 @@ version = "5.29.4"
description = ""
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"},
{file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"},
@@ -1291,6 +1361,7 @@ version = "19.0.1"
description = "Python library for Apache Arrow"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fc28912a2dc924dddc2087679cc8b7263accc71b9ff025a1362b004711661a69"},
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fca15aabbe9b8355800d923cc2e82c8ef514af321e18b437c3d782aa884eaeec"},
@@ -1345,6 +1416,7 @@ version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
{file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
@@ -1358,7 +1430,7 @@ typing-inspection = ">=0.4.0"
[package.extras]
email = ["email-validator (>=2.0.0)"]
-timezone = ["tzdata"]
+timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
[[package]]
name = "pydantic-core"
@@ -1366,6 +1438,7 @@ version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
@@ -1477,6 +1550,7 @@ version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -1499,6 +1573,7 @@ version = "0.23.8"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"},
{file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"},
@@ -1517,6 +1592,7 @@ version = "1.7.0"
description = "Adds the ability to retry flaky tests in CI environments"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"},
{file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"},
@@ -1534,6 +1610,7 @@ version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1548,6 +1625,7 @@ version = "1.1.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"},
{file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"},
@@ -1562,6 +1640,7 @@ version = "2025.2"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
+groups = ["dev"]
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -1573,6 +1652,7 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -1635,6 +1715,7 @@ version = "0.36.2"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"},
{file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"},
@@ -1651,6 +1732,7 @@ version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
@@ -1754,6 +1836,7 @@ version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
{file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
@@ -1771,6 +1854,7 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1792,6 +1876,7 @@ version = "0.24.0"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"},
{file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"},
@@ -1915,6 +2000,7 @@ version = "0.5.7"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
{file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
@@ -1942,6 +2028,7 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -1953,6 +2040,7 @@ version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -1964,6 +2052,7 @@ version = "0.9.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
@@ -2011,6 +2100,7 @@ version = "0.21.1"
description = ""
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"},
{file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"},
@@ -2043,6 +2133,8 @@ version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2084,6 +2176,7 @@ version = "4.67.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
{file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
@@ -2105,6 +2198,7 @@ version = "4.23.0.20241208"
description = "Typing stubs for jsonschema"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
{file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
@@ -2119,6 +2213,7 @@ version = "5.29.1.20250403"
description = "Typing stubs for protobuf"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"},
{file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"},
@@ -2130,6 +2225,7 @@ version = "2.9.0.20241206"
description = "Typing stubs for python-dateutil"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"},
{file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"},
@@ -2141,6 +2237,7 @@ version = "2.32.0.20250328"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"},
{file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"},
@@ -2155,6 +2252,7 @@ version = "4.13.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"},
{file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"},
@@ -2166,6 +2264,7 @@ version = "0.4.0"
description = "Runtime typing introspection tools"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"},
{file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"},
@@ -2180,6 +2279,7 @@ version = "2025.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
+groups = ["dev"]
files = [
{file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"},
{file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"},
@@ -2191,13 +2291,14 @@ version = "2.4.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"},
{file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -2208,6 +2309,7 @@ version = "1.17.2"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"},
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"},
@@ -2296,20 +2398,21 @@ version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
{file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
]
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]
[metadata]
-lock-version = "2.0"
+lock-version = "2.1"
python-versions = ">=3.9,<4"
-content-hash = "6b18fb6088ede49c2e52a1103a46481d57959171b5f2f6ee13cc3089a3804f5d"
+content-hash = "e691867001d491fb99a1c01a92235d2653daf49b1833b83d70defc13c3c843f1"
From 68d75bef4fad856d51bf29507b48afb47fa10f25 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 7 May 2025 18:39:15 +0100
Subject: [PATCH 04/39] Remove trailing slashes in .fernignore for consistency
---
.fernignore | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.fernignore b/.fernignore
index d52ed17e..e76c1b08 100644
--- a/.fernignore
+++ b/.fernignore
@@ -14,11 +14,11 @@ README.md
src/humanloop/decorators
src/humanloop/otel
src/humanloop/sync
-src/humanloop/cli/
+src/humanloop/cli
## Tests
-tests/
+tests
## CI
From 5be41cb36be06237d0a6265a62292ebf4f3e4926 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Wed, 7 May 2025 19:11:04 +0100
Subject: [PATCH 05/39] Move python-dotenv from dev to runtime dependencies
Sync client requires dotenv at runtime
---
poetry.lock | 4 ++--
pyproject.toml | 2 +-
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/poetry.lock b/poetry.lock
index 3d6d9b40..29a15f63 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1625,7 +1625,7 @@ version = "1.1.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.9"
-groups = ["dev"]
+groups = ["main"]
files = [
{file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"},
{file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"},
@@ -2415,4 +2415,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.9,<4"
-content-hash = "e691867001d491fb99a1c01a92235d2653daf49b1833b83d70defc13c3c843f1"
+content-hash = "0c9e19c602590fba318f4385529614dae82433689f41a0cb24e386a2ec0f7aaa"
diff --git a/pyproject.toml b/pyproject.toml
index 8dc69a73..acf6cc10 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -58,6 +58,7 @@ pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
click = "^8.0.0"
+python-dotenv = "^1.1.0"
[tool.poetry.group.dev.dependencies]
mypy = "1.0.1"
@@ -75,7 +76,6 @@ pandas = "^2.2.0"
parse-type = ">=0.6.4"
pyarrow = "^19.0.0"
pytest-retry = "^1.6.3"
-python-dotenv = "^1.0.1"
replicate = "^1.0.3"
ruff = "^0.5.6"
types-jsonschema = "^4.23.0.20240813"
From 53bdacc3ad7f6af204527383afd456f42d250885 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 8 May 2025 21:21:23 +0100
Subject: [PATCH 06/39] Move tests to internal directory; remove metadata
handler; remove progress spinner
---
.fernignore | 2 +-
pyproject.toml | 2 +-
src/humanloop/cli/__main__.py | 59 +--------
src/humanloop/cli/progress.py | 120 -----------------
src/humanloop/sync/metadata_handler.py | 125 ------------------
src/humanloop/sync/sync_client.py | 65 +++------
tests/custom/test_client.py | 7 -
tests/{integration => internal}/__init__.py | 0
tests/{ => internal}/assets/exact_match.py | 0
tests/{ => internal}/assets/levenshtein.py | 0
tests/{ => internal}/conftest.py | 0
.../integration}/__init__.py | 0
tests/{ => internal}/integration/conftest.py | 0
.../integration/test_decorators.py | 0
.../{ => internal}/integration/test_evals.py | 0
tests/internal/otel/__init__.py | 0
tests/{ => internal}/otel/test_helpers.py | 0
tests/internal/sync/__init__.py | 0
tests/{ => internal}/sync/test_sync.py | 0
19 files changed, 25 insertions(+), 355 deletions(-)
delete mode 100644 src/humanloop/cli/progress.py
delete mode 100644 src/humanloop/sync/metadata_handler.py
delete mode 100644 tests/custom/test_client.py
rename tests/{integration => internal}/__init__.py (100%)
rename tests/{ => internal}/assets/exact_match.py (100%)
rename tests/{ => internal}/assets/levenshtein.py (100%)
rename tests/{ => internal}/conftest.py (100%)
rename tests/{otel => internal/integration}/__init__.py (100%)
rename tests/{ => internal}/integration/conftest.py (100%)
rename tests/{ => internal}/integration/test_decorators.py (100%)
rename tests/{ => internal}/integration/test_evals.py (100%)
create mode 100644 tests/internal/otel/__init__.py
rename tests/{ => internal}/otel/test_helpers.py (100%)
create mode 100644 tests/internal/sync/__init__.py
rename tests/{ => internal}/sync/test_sync.py (100%)
diff --git a/.fernignore b/.fernignore
index e76c1b08..176a2fc7 100644
--- a/.fernignore
+++ b/.fernignore
@@ -18,7 +18,7 @@ src/humanloop/cli
## Tests
-tests
+tests/internal
## CI
diff --git a/pyproject.toml b/pyproject.toml
index acf6cc10..6825e6fc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,7 +8,7 @@ authors = []
# The metadata here is used during development but not published to PyPI
[tool.poetry]
name = "humanloop"
-version = "0.8.36b1"
+version = "0.8.36b2"
description = "Humanloop Python SDK"
readme = "README.md"
authors = []
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index ae5d1b43..bb10039a 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -9,7 +9,6 @@
from humanloop import Humanloop
from humanloop.sync.sync_client import SyncClient
from datetime import datetime
-from humanloop.cli.progress import progress_context
# Set up logging
logger = logging.getLogger(__name__)
@@ -166,12 +165,7 @@ def pull(
click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
click.echo(click.style(f"Environment: {environment or '(default)'}", fg=INFO_COLOR))
- if verbose:
- # Don't use the spinner in verbose mode as the spinner and sync client logging compete
- successful_files = sync_client.pull(path, environment)
- else:
- with progress_context("Pulling files..."):
- successful_files = sync_client.pull(path, environment)
+ successful_files = sync_client.pull(path, environment)
# Get metadata about the operation
metadata = sync_client.metadata.get_last_operation()
@@ -201,56 +195,7 @@ def pull(
click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
if metadata.get('error'):
click.echo(click.style(f"\nError: {metadata['error']}", fg=ERROR_COLOR))
-
-def format_timestamp(timestamp: str) -> str:
- """Format timestamp to a more readable format."""
- try:
- dt = datetime.fromisoformat(timestamp.replace('Z', '+00:00'))
- return dt.strftime('%Y-%m-%d %H:%M:%S')
- except (ValueError, AttributeError):
- return timestamp
-
-@cli.command()
-@click.option(
- "--oneline",
- is_flag=True,
- help="Display history in a single line per operation",
-)
-@handle_sync_errors
-@common_options
-def history(api_key: Optional[str], env_file: Optional[str], base_dir: str, base_url: Optional[str], oneline: bool):
- """Show sync operation history."""
- client = get_client(api_key, env_file, base_url)
- sync_client = SyncClient(client, base_dir=base_dir)
-
- history = sync_client.metadata.get_history()
- if not history:
- click.echo(click.style("No sync operations found in history.", fg=WARNING_COLOR))
- return
-
- if not oneline:
- click.echo(click.style("Sync Operation History:", fg=INFO_COLOR))
- click.echo(click.style("======================", fg=INFO_COLOR))
-
- for op in history:
- if oneline:
- # Format: timestamp | operation_type | path | environment | duration_ms | status
- status = click.style("✓", fg=SUCCESS_COLOR) if not op['failed_files'] else click.style("✗", fg=ERROR_COLOR)
- click.echo(f"{format_timestamp(op['timestamp'])} | {op['operation_type']} | {op['path'] or '(root)'} | {op['environment'] or '-'} | {op['duration_ms']}ms | {status}")
- else:
- click.echo(click.style(f"\nOperation: {op['operation_type']}", fg=INFO_COLOR))
- click.echo(f"Timestamp: {format_timestamp(op['timestamp'])}")
- click.echo(f"Path: {op['path'] or '(root)'}")
- if op['environment']:
- click.echo(f"Environment: {op['environment']}")
- click.echo(f"Duration: {op['duration_ms']}ms")
- if op['successful_files']:
- click.echo(click.style(f"Successfully {op['operation_type']}ed {len(op['successful_files'])} file{'' if len(op['successful_files']) == 1 else 's'}", fg=SUCCESS_COLOR))
- if op['failed_files']:
- click.echo(click.style(f"Failed to {op['operation_type']}ed {len(op['failed_files'])} file{'' if len(op['failed_files']) == 1 else 's'}", fg=ERROR_COLOR))
- if op['error']:
- click.echo(click.style(f"Error: {op['error']}", fg=ERROR_COLOR))
- click.echo(click.style("----------------------", fg=INFO_COLOR))
+
if __name__ == "__main__":
cli()
\ No newline at end of file
diff --git a/src/humanloop/cli/progress.py b/src/humanloop/cli/progress.py
deleted file mode 100644
index 67ef4506..00000000
--- a/src/humanloop/cli/progress.py
+++ /dev/null
@@ -1,120 +0,0 @@
-import sys
-import time
-from typing import Optional, Callable, Any
-from threading import Thread, Event
-from contextlib import contextmanager
-
-class Spinner:
- """A simple terminal spinner for indicating progress."""
-
- def __init__(
- self,
- message: str = "Loading...",
- delay: float = 0.1,
- spinner_chars: str = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
- ):
- self.message = message
- self.delay = delay
- self.spinner_chars = spinner_chars
- self.stop_event = Event()
- self.spinner_thread: Optional[Thread] = None
-
- def _spin(self):
- """The actual spinner animation."""
- i = 0
- while not self.stop_event.is_set():
- sys.stdout.write(f"\r{self.spinner_chars[i]} {self.message}")
- sys.stdout.flush()
- i = (i + 1) % len(self.spinner_chars)
- time.sleep(self.delay)
-
- def start(self):
- """Start the spinner animation."""
- self.stop_event.clear()
- self.spinner_thread = Thread(target=self._spin)
- self.spinner_thread.daemon = True
- self.spinner_thread.start()
-
- def stop(self, final_message: Optional[str] = None):
- """Stop the spinner and optionally display a final message."""
- if self.spinner_thread is None:
- return
-
- self.stop_event.set()
- self.spinner_thread.join()
-
- # Clear the spinner line
- sys.stdout.write("\r" + " " * (len(self.message) + 2) + "\r")
-
- if final_message:
- print(final_message)
- sys.stdout.flush()
-
- def update_message(self, message: str):
- """Update the spinner message."""
- self.message = message
-
-class ProgressTracker:
- """A simple progress tracker that shows percentage completion."""
-
- def __init__(
- self,
- total: int,
- message: str = "Progress",
- width: int = 40
- ):
- self.total = total
- self.current = 0
- self.message = message
- self.width = width
- self.start_time = time.time()
-
- def update(self, increment: int = 1):
- """Update the progress."""
- self.current += increment
- self._display()
-
- def _display(self):
- """Display the current progress."""
- percentage = (self.current / self.total) * 100
- filled = int(self.width * self.current / self.total)
- bar = "█" * filled + "░" * (self.width - filled)
-
- elapsed = time.time() - self.start_time
- if self.current > 0:
- rate = elapsed / self.current
- eta = rate * (self.total - self.current)
- time_str = f"ETA: {eta:.1f}s"
- else:
- time_str = "Calculating..."
-
- sys.stdout.write(f"\r{self.message}: [{bar}] {percentage:.1f}% {time_str}")
- sys.stdout.flush()
-
- def finish(self, final_message: Optional[str] = None):
- """Complete the progress bar and optionally show a final message."""
- self._display()
- print() # New line
- if final_message:
- print(final_message)
-
-@contextmanager
-def progress_context(message: str = "Loading...", success_message: str | None = None, error_message: str | None = None):
- """Context manager for showing a spinner during an operation."""
- spinner = Spinner(message)
- spinner.start()
- try:
- yield spinner
- spinner.stop(success_message)
- except Exception as e:
- spinner.stop(error_message)
- raise
-
-def with_progress(message: str = "Loading..."):
- """Decorator to add a spinner to a function."""
- def decorator(func: Callable):
- def wrapper(*args, **kwargs):
- with progress_context(message) as spinner:
- return func(*args, **kwargs)
- return wrapper
- return decorator
\ No newline at end of file
diff --git a/src/humanloop/sync/metadata_handler.py b/src/humanloop/sync/metadata_handler.py
deleted file mode 100644
index 18de2e8a..00000000
--- a/src/humanloop/sync/metadata_handler.py
+++ /dev/null
@@ -1,125 +0,0 @@
-import json
-import time
-from datetime import datetime
-from pathlib import Path
-from typing import Dict, List, Optional, TypedDict, NotRequired
-import logging
-
-logger = logging.getLogger(__name__)
-
-class OperationData(TypedDict):
- """Type definition for operation data structure."""
- timestamp: str
- operation_type: str
- path: str
- environment: NotRequired[Optional[str]]
- successful_files: List[str]
- failed_files: List[str]
- error: NotRequired[Optional[str]]
- duration_ms: float
-
-class Metadata(TypedDict):
- """Type definition for the metadata structure."""
- last_operation: Optional[OperationData]
- history: List[OperationData]
-
-class MetadataHandler:
- """Handles metadata storage and retrieval for sync operations.
-
- This class manages a JSON file that stores the last 5 sync operations
- and maintains a record of the most recent operation with detailed information.
- """
-
- def __init__(self, base_dir: Path, max_history: int = 5) -> None:
- """Initialize the metadata handler.
-
- Args:
- base_dir: Base directory where metadata will be stored
- max_history: Maximum number of operations to keep in history
- """
- self.base_dir = base_dir
- self.metadata_file = base_dir / ".sync_metadata.json"
- self.max_history = max_history
- self._ensure_metadata_file()
-
- def _ensure_metadata_file(self) -> None:
- """Ensure the metadata file exists with proper structure."""
- if not self.metadata_file.exists():
- initial_data: Metadata = {
- "last_operation": None,
- "history": []
- }
- self._write_metadata(initial_data)
-
- def _read_metadata(self) -> Metadata:
- """Read the current metadata from file."""
- try:
- with open(self.metadata_file, 'r') as f:
- return json.load(f)
- except Exception as e:
- logger.error(f"Error reading metadata file: {e}")
- return {"last_operation": None, "history": []}
-
- def _write_metadata(self, data: Metadata) -> None:
- """Write metadata to file."""
- try:
- self.metadata_file.parent.mkdir(parents=True, exist_ok=True)
- with open(self.metadata_file, 'w') as f:
- json.dump(data, f, indent=2)
- except Exception as e:
- logger.error(f"Error writing metadata file: {e}")
-
- def log_operation(
- self,
- operation_type: str,
- path: str,
- duration_ms: float,
- environment: Optional[str] = None,
- successful_files: Optional[List[str]] = None,
- failed_files: Optional[List[str]] = None,
- error: Optional[str] = None,
- ) -> None:
- """Log a sync operation.
-
- Args:
- operation_type: Type of operation (e.g., "pull", "push")
- path: The path that was synced
- duration_ms: Duration of the operation in milliseconds
- environment: Optional environment name
- successful_files: List of successfully processed files
- failed_files: List of files that failed to process
- error: Any error message if the operation failed
- """
- current_time = datetime.now().isoformat()
-
- operation_data: OperationData = {
- "timestamp": current_time,
- "operation_type": operation_type,
- "path": path,
- "environment": environment,
- "successful_files": successful_files or [],
- "failed_files": failed_files or [],
- "error": error,
- "duration_ms": duration_ms
- }
-
- metadata = self._read_metadata()
-
- # Update last operation
- metadata["last_operation"] = operation_data
-
- # Update history
- metadata["history"].insert(0, operation_data)
- metadata["history"] = metadata["history"][:self.max_history]
-
- self._write_metadata(metadata)
-
- def get_last_operation(self) -> Optional[OperationData]:
- """Get the most recent operation details."""
- metadata = self._read_metadata()
- return metadata.get("last_operation")
-
- def get_history(self) -> List[OperationData]:
- """Get the operation history."""
- metadata = self._read_metadata()
- return metadata.get("history", [])
\ No newline at end of file
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 6b16cde0..775e054b 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -3,7 +3,6 @@
from typing import List, TYPE_CHECKING, Optional
from functools import lru_cache
from humanloop.types import FileType
-from .metadata_handler import MetadataHandler
import time
from humanloop.error import HumanloopRuntimeError
import json
@@ -81,8 +80,6 @@ def __init__(
# Create a new cached version of get_file_content with the specified cache size
self.get_file_content = lru_cache(maxsize=cache_size)(self._get_file_content_impl)
- # Initialize metadata handler
- self.metadata = MetadataHandler(self.base_dir)
def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
"""Implementation of get_file_content without the cache decorator.
@@ -283,45 +280,25 @@ def pull(self, path: str | None = None, environment: str | None = None) -> List[
normalized_path = self._normalize_path(path) if path else None
logger.info(f"Starting pull operation: path={normalized_path or '(root)'}, environment={environment or '(default)'}")
- try:
- if path is None:
- # Pull all files from the root
- logger.debug("Pulling all files from root")
- successful_files = self._pull_directory(None, environment)
- failed_files = [] # Failed files are already logged in _pull_directory
+
+ if path is None:
+ # Pull all files from the root
+ logger.debug("Pulling all files from root")
+ successful_files = self._pull_directory(None, environment)
+ failed_files = [] # Failed files are already logged in _pull_directory
+ else:
+ if self.is_file(path.strip()):
+ logger.debug(f"Pulling specific file: {normalized_path}")
+ self._pull_file(normalized_path, environment)
+ successful_files = [path]
+ failed_files = []
else:
- if self.is_file(path.strip()):
- logger.debug(f"Pulling specific file: {normalized_path}")
- self._pull_file(normalized_path, environment)
- successful_files = [path]
- failed_files = []
- else:
- logger.debug(f"Pulling directory: {normalized_path}")
- successful_files = self._pull_directory(normalized_path, environment)
- failed_files = [] # Failed files are already logged in _pull_directory
-
- duration_ms = int((time.time() - start_time) * 1000)
- logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
-
- # Log the successful operation
- self.metadata.log_operation(
- operation_type="pull",
- path=normalized_path or "", # Use empty string if path is None
- environment=environment,
- successful_files=successful_files,
- failed_files=failed_files,
- duration_ms=duration_ms
- )
-
- return successful_files
- except Exception as e:
- duration_ms = int((time.time() - start_time) * 1000)
- # Log the failed operation
- self.metadata.log_operation(
- operation_type="pull",
- path=normalized_path or "", # Use empty string if path is None
- environment=environment,
- error=str(e),
- duration_ms=duration_ms
- )
- raise
+ logger.debug(f"Pulling directory: {normalized_path}")
+ successful_files = self._pull_directory(normalized_path, environment)
+ failed_files = [] # Failed files are already logged in _pull_directory
+
+ duration_ms = int((time.time() - start_time) * 1000)
+ logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
+
+ return successful_files
+
diff --git a/tests/custom/test_client.py b/tests/custom/test_client.py
deleted file mode 100644
index 3e7c8334..00000000
--- a/tests/custom/test_client.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import pytest
-
-
-# Get started with writing tests with pytest at https://docs.pytest.org
-@pytest.mark.skip(reason="Unimplemented")
-def test_client() -> None:
- assert True is True
diff --git a/tests/integration/__init__.py b/tests/internal/__init__.py
similarity index 100%
rename from tests/integration/__init__.py
rename to tests/internal/__init__.py
diff --git a/tests/assets/exact_match.py b/tests/internal/assets/exact_match.py
similarity index 100%
rename from tests/assets/exact_match.py
rename to tests/internal/assets/exact_match.py
diff --git a/tests/assets/levenshtein.py b/tests/internal/assets/levenshtein.py
similarity index 100%
rename from tests/assets/levenshtein.py
rename to tests/internal/assets/levenshtein.py
diff --git a/tests/conftest.py b/tests/internal/conftest.py
similarity index 100%
rename from tests/conftest.py
rename to tests/internal/conftest.py
diff --git a/tests/otel/__init__.py b/tests/internal/integration/__init__.py
similarity index 100%
rename from tests/otel/__init__.py
rename to tests/internal/integration/__init__.py
diff --git a/tests/integration/conftest.py b/tests/internal/integration/conftest.py
similarity index 100%
rename from tests/integration/conftest.py
rename to tests/internal/integration/conftest.py
diff --git a/tests/integration/test_decorators.py b/tests/internal/integration/test_decorators.py
similarity index 100%
rename from tests/integration/test_decorators.py
rename to tests/internal/integration/test_decorators.py
diff --git a/tests/integration/test_evals.py b/tests/internal/integration/test_evals.py
similarity index 100%
rename from tests/integration/test_evals.py
rename to tests/internal/integration/test_evals.py
diff --git a/tests/internal/otel/__init__.py b/tests/internal/otel/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/otel/test_helpers.py b/tests/internal/otel/test_helpers.py
similarity index 100%
rename from tests/otel/test_helpers.py
rename to tests/internal/otel/test_helpers.py
diff --git a/tests/internal/sync/__init__.py b/tests/internal/sync/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/sync/test_sync.py b/tests/internal/sync/test_sync.py
similarity index 100%
rename from tests/sync/test_sync.py
rename to tests/internal/sync/test_sync.py
From 4bb96939db1f4a96608a0417d59f67174871fc90 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 8 May 2025 21:23:16 +0100
Subject: [PATCH 07/39] Add to docstring in get_client for cli that it raises
if no api key found
---
src/humanloop/cli/__main__.py | 8 +++++++-
1 file changed, 7 insertions(+), 1 deletion(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index bb10039a..bb8fe3db 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -28,7 +28,13 @@
MAX_FILES_TO_DISPLAY = 10
def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None) -> Humanloop:
- """Get a Humanloop client instance."""
+ """Get a Humanloop client instance.
+
+ If no API key is provided, it will be loaded from the .env file, or the environment variable HUMANLOOP_API_KEY.
+
+ Raises:
+ click.ClickException: If no API key is found.
+ """
if not api_key:
if env_file:
load_dotenv(env_file)
From 01f0550ed707be9566891edb97dc8a022714e168 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 8 May 2025 21:24:27 +0100
Subject: [PATCH 08/39] docs: improve clarity in handle_sync_errors decorator
docstring
---
src/humanloop/cli/__main__.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index bb8fe3db..30c7fcb9 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -88,7 +88,10 @@ def wrapper(*args, **kwargs):
return wrapper
def handle_sync_errors(f: Callable) -> Callable:
- """Decorator for handling sync operation errors."""
+ """Decorator for handling sync operation errors.
+
+ If an error occurs in any operation that uses this decorator, it will be logged and the program will exit with a non-zero exit code.
+ """
@wraps(f)
def wrapper(*args, **kwargs):
try:
From aa546ad0c2668ca0715fefc8ab7bf8a8d1f08eba Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 8 May 2025 21:26:00 +0100
Subject: [PATCH 09/39] Clarify why cli method does nothing
---
src/humanloop/cli/__main__.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index 30c7fcb9..7e6444da 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -108,7 +108,7 @@ def wrapper(*args, **kwargs):
"max_content_width": 100,
}
)
-def cli():
+def cli(): # Does nothing because used as a group for other subcommands (pull, push, etc.)
"""Humanloop CLI for managing sync operations."""
pass
From 861e45f01ca36192d552d377cc5b28c5168825ac Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 8 May 2025 21:29:21 +0100
Subject: [PATCH 10/39] docs: improve help for path option in pull operation in
cli
---
src/humanloop/cli/__main__.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index 7e6444da..120e78bd 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -116,9 +116,8 @@ def cli(): # Does nothing because used as a group for other subcommands (pull, p
@click.option(
"--path",
"-p",
- help="Path to pull (file or directory). If not provided, pulls everything. "
- "To pull a specific file, ensure the extension for the file is included (e.g. .prompt or .agent). "
- "To pull a directory, simply specify the path to the directory (e.g. abc/def to pull all files under abc/def and its subdirectories).",
+ help="Path in the Humanloop workspace to pull from (file or directory). You can pull an entire directory (e.g. 'my/directory') "
+ "or a specific file (e.g. 'my/directory/my_prompt.prompt'). When pulling a directory, all files within that directory and its subdirectories will be included.",
default=None,
)
@click.option(
From a6964d8a540d4083c464e02adb418250df7cb458 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 8 May 2025 21:34:11 +0100
Subject: [PATCH 11/39] docs: improve help for pull command in cli
---
src/humanloop/cli/__main__.py | 28 ++++++++++++++++------------
1 file changed, 16 insertions(+), 12 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index 120e78bd..b66c2e87 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -147,7 +147,7 @@ def pull(
\b
This command will:
- 1. Fetch prompt and agent files from your Humanloop workspace
+ 1. Fetch Prompt and Agent files from your Humanloop workspace
2. Save them to your local filesystem
3. Maintain the same directory structure as in Humanloop
4. Add appropriate file extensions (.prompt or .agent)
@@ -155,17 +155,21 @@ def pull(
\b
The files will be saved with the following structure:
{base_dir}/
- ├── prompts/
- │ ├── my_prompt.prompt
- │ └── nested/
- │ └── another_prompt.prompt
- └── agents/
- └── my_agent.agent
-
- The operation will overwrite existing files with the latest version from Humanloop
- but will not delete local files that don't exist in the remote workspace.
-
- Currently only supports syncing prompt and agent files. Other file types will be skipped."""
+ ├── my_project/
+ │ ├── prompts/
+ │ │ ├── my_prompt.prompt
+ │ │ └── nested/
+ │ │ └── another_prompt.prompt
+ │ └── agents/
+ │ └── my_agent.agent
+ └── another_project/
+ └── prompts/
+ └── other_prompt.prompt
+
+ If a file exists both locally and in the Humanloop workspace, the local file will be overwritten
+ with the version from Humanloop. Files that only exist locally will not be affected.
+
+ Currently only supports syncing Prompt and Agent files. Other file types will be skipped."""
client = get_client(api_key, env_file, base_url)
sync_client = SyncClient(client, base_dir=base_dir, log_level=logging.DEBUG if verbose else logging.WARNING)
From 5e51de07518dfba78504ad1593a366b94877bda8 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 8 May 2025 21:47:20 +0100
Subject: [PATCH 12/39] Clean up error handling in sync client
---
src/humanloop/sync/sync_client.py | 104 ++++++++++++++++++++----------
1 file changed, 69 insertions(+), 35 deletions(-)
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 775e054b..846fe632 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -1,6 +1,6 @@
import logging
from pathlib import Path
-from typing import List, TYPE_CHECKING, Optional
+from typing import List, Tuple, TYPE_CHECKING
from functools import lru_cache
from humanloop.types import FileType
import time
@@ -189,24 +189,49 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
raise
- def _pull_file(self, path: str, environment: str | None = None) -> None:
- """Pull a specific file from Humanloop to local filesystem."""
- file = self.client.files.retrieve_by_path(
- path=path,
- environment=environment,
- include_raw_file_content=True
- )
+ def _pull_file(self, path: str, environment: str | None = None) -> bool:
+ """Pull a specific file from Humanloop to local filesystem.
+
+ Returns:
+ True if the file was successfully pulled, False otherwise
+
+ Raises:
+ HumanloopRuntimeError: If there's an error communicating with the API
+ """
+ try:
+ file = self.client.files.retrieve_by_path(
+ path=path,
+ environment=environment,
+ include_raw_file_content=True
+ )
+ except Exception as e:
+ logger.error(f"Failed to pull file {path}: {format_api_error(e)}")
+ return False
if file.type not in ["prompt", "agent"]:
raise ValueError(f"Unsupported file type: {file.type}")
- self._save_serialized_file(file.raw_file_content, file.path, file.type)
+ try:
+ self._save_serialized_file(file.raw_file_content, file.path, file.type)
+ return True
+ except Exception as e:
+ logger.error(f"Failed to save file {path}: {str(e)}")
+ return False
def _pull_directory(self,
path: str | None = None,
environment: str | None = None,
- ) -> List[str]:
- """Sync Prompt and Agent files from Humanloop to local filesystem."""
+ ) -> Tuple[List[str], List[str]]:
+ """Sync Prompt and Agent files from Humanloop to local filesystem.
+
+ Returns:
+ Tuple of two lists:
+ - First list contains paths of successfully synced files
+ - Second list contains paths of files that failed to sync
+
+ Raises:
+ HumanloopRuntimeError: If there's an error communicating with the API
+ """
successful_files = []
failed_files = []
page = 1
@@ -248,21 +273,21 @@ def _pull_directory(self,
successful_files.append(file.path)
except Exception as e:
failed_files.append(file.path)
- logger.error(f"Task failed for {file.path}: {str(e)}")
+ logger.error(f"Failed to save {file.path}: {str(e)}")
page += 1
except Exception as e:
formatted_error = format_api_error(e)
- raise HumanloopRuntimeError(f"Failed to pull files: {formatted_error}")
+ raise HumanloopRuntimeError(f"Failed to fetch page {page}: {formatted_error}")
if successful_files:
logger.info(f"Successfully pulled {len(successful_files)} files")
if failed_files:
logger.warning(f"Failed to pull {len(failed_files)} files")
- return successful_files
+ return successful_files, failed_files
- def pull(self, path: str | None = None, environment: str | None = None) -> List[str]:
+ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple[List[str], List[str]]:
"""Pull files from Humanloop to local filesystem.
If the path ends with .prompt or .agent, pulls that specific file.
@@ -274,31 +299,40 @@ def pull(self, path: str | None = None, environment: str | None = None) -> List[
environment: The environment to pull from
Returns:
- List of successfully processed file paths
+ Tuple of two lists:
+ - First list contains paths of successfully synced files
+ - Second list contains paths of files that failed to sync
+
+ Raises:
+ HumanloopRuntimeError: If there's an error communicating with the API
"""
start_time = time.time()
normalized_path = self._normalize_path(path) if path else None
logger.info(f"Starting pull operation: path={normalized_path or '(root)'}, environment={environment or '(default)'}")
- if path is None:
- # Pull all files from the root
- logger.debug("Pulling all files from root")
- successful_files = self._pull_directory(None, environment)
- failed_files = [] # Failed files are already logged in _pull_directory
- else:
- if self.is_file(path.strip()):
- logger.debug(f"Pulling specific file: {normalized_path}")
- self._pull_file(normalized_path, environment)
- successful_files = [path]
- failed_files = []
+ try:
+ if path is None:
+ # Pull all files from the root
+ logger.debug("Pulling all files from root")
+ successful_files, failed_files = self._pull_directory(None, environment)
else:
- logger.debug(f"Pulling directory: {normalized_path}")
- successful_files = self._pull_directory(normalized_path, environment)
- failed_files = [] # Failed files are already logged in _pull_directory
-
- duration_ms = int((time.time() - start_time) * 1000)
- logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
-
- return successful_files
+ if self.is_file(path.strip()):
+ logger.debug(f"Pulling specific file: {normalized_path}")
+ if self._pull_file(normalized_path, environment):
+ successful_files = [path]
+ failed_files = []
+ else:
+ successful_files = []
+ failed_files = [path]
+ else:
+ logger.debug(f"Pulling directory: {normalized_path}")
+ successful_files, failed_files = self._pull_directory(normalized_path, environment)
+
+ duration_ms = int((time.time() - start_time) * 1000)
+ logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
+
+ return successful_files, failed_files
+ except Exception as e:
+ raise HumanloopRuntimeError(f"Pull operation failed: {str(e)}")
From 59f0f4c7e12d758590bc250d311ff14c8047cc46 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Thu, 8 May 2025 21:50:28 +0100
Subject: [PATCH 13/39] Simplify logging of successful and failed files in CLI
+ add quiet option
---
src/humanloop/cli/__main__.py | 55 ++++++++++++++++-------------------
src/humanloop/client.py | 4 +--
2 files changed, 27 insertions(+), 32 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index b66c2e87..ac749d80 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -9,6 +9,7 @@
from humanloop import Humanloop
from humanloop.sync.sync_client import SyncClient
from datetime import datetime
+import time
# Set up logging
logger = logging.getLogger(__name__)
@@ -132,6 +133,12 @@ def cli(): # Does nothing because used as a group for other subcommands (pull, p
is_flag=True,
help="Show detailed information about the operation",
)
+@click.option(
+ "--quiet",
+ "-q",
+ is_flag=True,
+ help="Suppress output of successful files",
+)
@handle_sync_errors
@common_options
def pull(
@@ -141,7 +148,8 @@ def pull(
env_file: Optional[str],
base_dir: str,
base_url: Optional[str],
- verbose: bool
+ verbose: bool,
+ quiet: bool
):
"""Pull prompt and agent files from Humanloop to your local filesystem.
@@ -177,37 +185,24 @@ def pull(
click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
click.echo(click.style(f"Environment: {environment or '(default)'}", fg=INFO_COLOR))
- successful_files = sync_client.pull(path, environment)
+ start_time = time.time()
+ successful_files, failed_files = sync_client.pull(path, environment)
+ duration_ms = int((time.time() - start_time) * 1000)
- # Get metadata about the operation
- metadata = sync_client.metadata.get_last_operation()
- if metadata:
- # Determine if the operation was successful based on failed_files
- is_successful = not metadata.get('failed_files') and not metadata.get('error')
- duration_color = SUCCESS_COLOR if is_successful else ERROR_COLOR
- click.echo(click.style(f"Pull completed in {metadata['duration_ms']}ms", fg=duration_color))
-
- if metadata['successful_files']:
- click.echo(click.style(f"\nSuccessfully pulled {len(metadata['successful_files'])} files:", fg=SUCCESS_COLOR))
-
- if verbose:
- for file in metadata['successful_files']:
- click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
- else:
- files_to_display = metadata['successful_files'][:MAX_FILES_TO_DISPLAY]
- for file in files_to_display:
- click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
-
- if len(metadata['successful_files']) > MAX_FILES_TO_DISPLAY:
- remaining = len(metadata['successful_files']) - MAX_FILES_TO_DISPLAY
- click.echo(click.style(f" ...and {remaining} more", fg=SUCCESS_COLOR))
- if metadata['failed_files']:
- click.echo(click.style(f"\nFailed to pull {len(metadata['failed_files'])} files:", fg=ERROR_COLOR))
- for file in metadata['failed_files']:
- click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
- if metadata.get('error'):
- click.echo(click.style(f"\nError: {metadata['error']}", fg=ERROR_COLOR))
+ # Determine if the operation was successful based on failed_files
+ is_successful = not failed_files
+ duration_color = SUCCESS_COLOR if is_successful else ERROR_COLOR
+ click.echo(click.style(f"Pull completed in {duration_ms}ms", fg=duration_color))
+
+ if successful_files and not quiet:
+ click.echo(click.style(f"\nSuccessfully pulled {len(successful_files)} files:", fg=SUCCESS_COLOR))
+ for file in successful_files:
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+ if failed_files:
+ click.echo(click.style(f"\nFailed to pull {len(failed_files)} files:", fg=ERROR_COLOR))
+ for file in failed_files:
+ click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
if __name__ == "__main__":
cli()
\ No newline at end of file
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 996b75ad..ee06b6a9 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,6 +1,6 @@
import os
import typing
-from typing import Any, List, Optional, Sequence
+from typing import Any, List, Optional, Sequence, Tuple
import httpx
from opentelemetry.sdk.resources import Resource
@@ -390,7 +390,7 @@ def agent():
def pull(self,
environment: str | None = None,
path: str | None = None
- ) -> List[str]:
+ ) -> Tuple[List[str], List[str]]:
"""Pull Prompt and Agent files from Humanloop to local filesystem.
This method will:
From 0e03fcb0bcd40700901e5761be0d18cd5f1f4957 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Fri, 9 May 2025 16:14:26 +0100
Subject: [PATCH 14/39] Address PR feedback
---
src/humanloop/cli/__main__.py | 12 +++----
src/humanloop/overload.py | 10 +++---
src/humanloop/sync/sync_client.py | 59 +++++++++++++------------------
3 files changed, 35 insertions(+), 46 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index ac749d80..ebf4fd33 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -26,8 +26,6 @@
INFO_COLOR = "blue"
WARNING_COLOR = "yellow"
-MAX_FILES_TO_DISPLAY = 10
-
def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None) -> Humanloop:
"""Get a Humanloop client instance.
@@ -74,7 +72,7 @@ def common_options(f: Callable) -> Callable:
)
@click.option(
"--base-dir",
- help="Base directory for pulled files",
+ help="Base directory for pulled files (default: humanloop/)",
default="humanloop",
type=click.Path(),
)
@@ -117,7 +115,7 @@ def cli(): # Does nothing because used as a group for other subcommands (pull, p
@click.option(
"--path",
"-p",
- help="Path in the Humanloop workspace to pull from (file or directory). You can pull an entire directory (e.g. 'my/directory') "
+ help="Path in the Humanloop workspace to pull from (file or directory). You can pull an entire directory (e.g. 'my/directory/') "
"or a specific file (e.g. 'my/directory/my_prompt.prompt'). When pulling a directory, all files within that directory and its subdirectories will be included.",
default=None,
)
@@ -151,18 +149,18 @@ def pull(
verbose: bool,
quiet: bool
):
- """Pull prompt and agent files from Humanloop to your local filesystem.
+ """Pull Prompt and Agent files from Humanloop to your local filesystem.
\b
This command will:
1. Fetch Prompt and Agent files from your Humanloop workspace
- 2. Save them to your local filesystem
+ 2. Save them to your local filesystem (default: humanloop/)
3. Maintain the same directory structure as in Humanloop
4. Add appropriate file extensions (.prompt or .agent)
\b
The files will be saved with the following structure:
- {base_dir}/
+ humanloop/
├── my_project/
│ ├── prompts/
│ │ ├── my_prompt.prompt
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index bd409236..64638a65 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -176,13 +176,13 @@ def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
# Handle local files if enabled
if use_local_files and "path" in kwargs:
# Check if version_id or environment is specified
- has_version_info = "version_id" in kwargs or "environment" in kwargs
+ use_remote = any(["version_id" in kwargs, "environment" in kwargs])
normalized_path = sync_client._normalize_path(kwargs["path"])
- if has_version_info:
- logger.warning(
- f"Ignoring local file for `{normalized_path}` as version_id or environment was specified. "
- "Using remote version instead."
+ if use_remote:
+ raise HumanloopRuntimeError(
+ f"Cannot use local file for `{normalized_path}` as version_id or environment was specified. "
+ "Please either remove version_id/environment to use local files, or set use_local_files=False to use remote files."
)
else:
# Only use local file if no version info is specified
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 846fe632..4d6f9bdb 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -6,6 +6,7 @@
import time
from humanloop.error import HumanloopRuntimeError
import json
+import re
if TYPE_CHECKING:
from humanloop.base_client import BaseHumanloop
@@ -56,6 +57,9 @@ class SyncClient:
manually cleared using the clear_cache() method.
"""
+ # File types that can be serialized to/from the filesystem
+ SERIALIZABLE_FILE_TYPES = ["prompt", "agent"]
+
def __init__(
self,
client: "BaseHumanloop",
@@ -75,7 +79,6 @@ def __init__(
self.base_dir = Path(base_dir)
self._cache_size = cache_size
- global logger
logger.setLevel(log_level)
# Create a new cached version of get_file_content with the specified cache size
@@ -94,7 +97,10 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
The raw file content
Raises:
- HumanloopRuntimeError: If the file doesn't exist or can't be read
+ HumanloopRuntimeError: In two cases:
+ 1. If the file doesn't exist at the expected location
+ 2. If there's a filesystem error when trying to read the file
+ (e.g., permission denied, file is locked, etc.)
"""
# Construct path to local file
local_path = self.base_dir / path
@@ -137,32 +143,17 @@ def clear_cache(self) -> None:
def _normalize_path(self, path: str) -> str:
"""Normalize the path by:
- 1. Removing any file extensions (.prompt, .agent)
- 2. Converting backslashes to forward slashes
- 3. Removing leading and trailing slashes
- 4. Removing leading and trailing whitespace
- 5. Normalizing multiple consecutive slashes into a single forward slash
-
- Args:
- path: The path to normalize
-
- Returns:
- The normalized path
+ 1. Converting to a Path object to handle platform-specific separators
+ 2. Removing any file extensions
+ 3. Converting to a string with forward slashes and no leading/trailing slashes
"""
- # Remove any file extensions
- path = path.rsplit('.', 1)[0] if '.' in path else path
-
- # Convert backslashes to forward slashes and normalize multiple slashes
- path = path.replace('\\', '/')
+ # Convert to Path object to handle platform-specific separators
+ path_obj = Path(path)
- # Remove leading/trailing whitespace and slashes
- path = path.strip().strip('/')
-
- # Normalize multiple consecutive slashes into a single forward slash
- while '//' in path:
- path = path.replace('//', '/')
-
- return path
+ # Remove extension, convert to string with forward slashes, and remove leading/trailing slashes
+ normalized = str(path_obj.with_suffix(''))
+ # Replace all backslashes and normalize multiple forward slashes
+ return '/'.join(part for part in normalized.replace('\\', '/').split('/') if part)
def is_file(self, path: str) -> bool:
"""Check if the path is a file by checking for .prompt or .agent extension."""
@@ -183,7 +174,7 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
with open(new_path, "w") as f:
f.write(serialized_content)
- # Clear the cache for this file to ensure we get fresh content next time
+ # Clear the cache when a file is saved
self.clear_cache()
except Exception as e:
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
@@ -208,7 +199,7 @@ def _pull_file(self, path: str, environment: str | None = None) -> bool:
logger.error(f"Failed to pull file {path}: {format_api_error(e)}")
return False
- if file.type not in ["prompt", "agent"]:
+ if file.type not in self.SERIALIZABLE_FILE_TYPES:
raise ValueError(f"Unsupported file type: {file.type}")
try:
@@ -240,7 +231,7 @@ def _pull_directory(self,
while True:
try:
- logger.debug(f"Requesting page {page} of files")
+ logger.debug(f"`{path}`: Requesting page {page} of files")
response = self.client.files.list_files(
type=["prompt", "agent"],
page=page,
@@ -250,15 +241,15 @@ def _pull_directory(self,
)
if len(response.records) == 0:
- logger.debug("No more files found")
+ logger.debug(f"Finished reading files for path `{path}`")
break
- logger.debug(f"Found {len(response.records)} files from page {page}")
+ logger.debug(f"`{path}`: Read page {page} containing {len(response.records)} files")
# Process each file
for file in response.records:
# Skip if not a Prompt or Agent
- if file.type not in ["prompt", "agent"]:
+ if file.type not in self.SERIALIZABLE_FILE_TYPES:
logger.warning(f"Skipping unsupported file type: {file.type}")
continue
@@ -268,7 +259,7 @@ def _pull_directory(self,
continue
try:
- logger.debug(f"Saving {file.type} {file.path}")
+ logger.debug(f"Writing {file.type} {file.path} to disk")
self._save_serialized_file(file.raw_file_content, file.path, file.type)
successful_files.append(file.path)
except Exception as e:
@@ -318,7 +309,7 @@ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple
successful_files, failed_files = self._pull_directory(None, environment)
else:
if self.is_file(path.strip()):
- logger.debug(f"Pulling specific file: {normalized_path}")
+ logger.debug(f"Pulling file: {normalized_path}")
if self._pull_file(normalized_path, environment):
successful_files = [path]
failed_files = []
From 058274a9798bc84073a771f92b8de545598bfc4e Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Sat, 10 May 2025 15:21:16 +0100
Subject: [PATCH 15/39] chore: bump version
---
pyproject.toml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/pyproject.toml b/pyproject.toml
index 6825e6fc..edebc9fd 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -8,7 +8,7 @@ authors = []
# The metadata here is used during development but not published to PyPI
[tool.poetry]
name = "humanloop"
-version = "0.8.36b2"
+version = "0.8.39"
description = "Humanloop Python SDK"
readme = "README.md"
authors = []
From 6151c8efd07ebd12bd7442f8d68a3f7eb3f5e654 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Sat, 10 May 2025 15:31:21 +0100
Subject: [PATCH 16/39] test(sync): add comprehensive unit test suite for
SyncClient
---
.../__init__.py => custom/README.md} | 0
.../integration => custom}/__init__.py | 0
.../assets/exact_match.py | 0
.../assets/levenshtein.py | 0
tests/{internal => custom}/conftest.py | 0
.../otel => custom/integration}/__init__.py | 0
.../integration/conftest.py | 0
.../integration/test_decorators.py | 0
.../integration/test_evals.py | 0
.../sync => custom/integration}/test_sync.py | 0
.../sync => custom/otel}/__init__.py | 0
.../{internal => custom}/otel/test_helpers.py | 0
tests/custom/sync/__init__.py | 0
tests/custom/sync/test_sync_client.py | 161 ++++++++++++++++++
14 files changed, 161 insertions(+)
rename tests/{internal/__init__.py => custom/README.md} (100%)
rename tests/{internal/integration => custom}/__init__.py (100%)
rename tests/{internal => custom}/assets/exact_match.py (100%)
rename tests/{internal => custom}/assets/levenshtein.py (100%)
rename tests/{internal => custom}/conftest.py (100%)
rename tests/{internal/otel => custom/integration}/__init__.py (100%)
rename tests/{internal => custom}/integration/conftest.py (100%)
rename tests/{internal => custom}/integration/test_decorators.py (100%)
rename tests/{internal => custom}/integration/test_evals.py (100%)
rename tests/{internal/sync => custom/integration}/test_sync.py (100%)
rename tests/{internal/sync => custom/otel}/__init__.py (100%)
rename tests/{internal => custom}/otel/test_helpers.py (100%)
create mode 100644 tests/custom/sync/__init__.py
create mode 100644 tests/custom/sync/test_sync_client.py
diff --git a/tests/internal/__init__.py b/tests/custom/README.md
similarity index 100%
rename from tests/internal/__init__.py
rename to tests/custom/README.md
diff --git a/tests/internal/integration/__init__.py b/tests/custom/__init__.py
similarity index 100%
rename from tests/internal/integration/__init__.py
rename to tests/custom/__init__.py
diff --git a/tests/internal/assets/exact_match.py b/tests/custom/assets/exact_match.py
similarity index 100%
rename from tests/internal/assets/exact_match.py
rename to tests/custom/assets/exact_match.py
diff --git a/tests/internal/assets/levenshtein.py b/tests/custom/assets/levenshtein.py
similarity index 100%
rename from tests/internal/assets/levenshtein.py
rename to tests/custom/assets/levenshtein.py
diff --git a/tests/internal/conftest.py b/tests/custom/conftest.py
similarity index 100%
rename from tests/internal/conftest.py
rename to tests/custom/conftest.py
diff --git a/tests/internal/otel/__init__.py b/tests/custom/integration/__init__.py
similarity index 100%
rename from tests/internal/otel/__init__.py
rename to tests/custom/integration/__init__.py
diff --git a/tests/internal/integration/conftest.py b/tests/custom/integration/conftest.py
similarity index 100%
rename from tests/internal/integration/conftest.py
rename to tests/custom/integration/conftest.py
diff --git a/tests/internal/integration/test_decorators.py b/tests/custom/integration/test_decorators.py
similarity index 100%
rename from tests/internal/integration/test_decorators.py
rename to tests/custom/integration/test_decorators.py
diff --git a/tests/internal/integration/test_evals.py b/tests/custom/integration/test_evals.py
similarity index 100%
rename from tests/internal/integration/test_evals.py
rename to tests/custom/integration/test_evals.py
diff --git a/tests/internal/sync/test_sync.py b/tests/custom/integration/test_sync.py
similarity index 100%
rename from tests/internal/sync/test_sync.py
rename to tests/custom/integration/test_sync.py
diff --git a/tests/internal/sync/__init__.py b/tests/custom/otel/__init__.py
similarity index 100%
rename from tests/internal/sync/__init__.py
rename to tests/custom/otel/__init__.py
diff --git a/tests/internal/otel/test_helpers.py b/tests/custom/otel/test_helpers.py
similarity index 100%
rename from tests/internal/otel/test_helpers.py
rename to tests/custom/otel/test_helpers.py
diff --git a/tests/custom/sync/__init__.py b/tests/custom/sync/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/custom/sync/test_sync_client.py b/tests/custom/sync/test_sync_client.py
new file mode 100644
index 00000000..29b2e712
--- /dev/null
+++ b/tests/custom/sync/test_sync_client.py
@@ -0,0 +1,161 @@
+import pytest
+from pathlib import Path
+from unittest.mock import Mock, patch
+from humanloop.sync.sync_client import SyncClient
+from humanloop.error import HumanloopRuntimeError
+from humanloop.types import FileType
+
+@pytest.fixture
+def mock_client():
+ return Mock()
+
+@pytest.fixture
+def sync_client(mock_client, tmp_path):
+ return SyncClient(
+ client=mock_client,
+ base_dir=str(tmp_path),
+ cache_size=10,
+ log_level=10 # DEBUG level for testing
+ )
+
+def test_init(sync_client, tmp_path):
+ """Test basic initialization of SyncClient."""
+ assert sync_client.base_dir == tmp_path
+ assert sync_client._cache_size == 10
+ assert sync_client.SERIALIZABLE_FILE_TYPES == ["prompt", "agent"]
+
+def test_normalize_path(sync_client):
+ """Test path normalization functionality."""
+ test_cases = [
+ ("path/to/file.prompt", "path/to/file"),
+ ("path\\to\\file.agent", "path/to/file"),
+ ("/leading/slashes/file.prompt", "leading/slashes/file"),
+ ("trailing/slashes/file.agent/", "trailing/slashes/file"),
+ ("multiple//slashes//file.prompt", "multiple/slashes/file"),
+ ]
+
+ for input_path, expected in test_cases:
+ assert sync_client._normalize_path(input_path) == expected
+
+def test_is_file(sync_client):
+ """Test file type detection."""
+ assert sync_client.is_file("test.prompt")
+ assert sync_client.is_file("test.agent")
+ assert not sync_client.is_file("test.txt")
+ assert not sync_client.is_file("test")
+
+def test_save_and_read_file(sync_client):
+ """Test saving and reading files."""
+ content = "test content"
+ path = "test/path"
+ file_type = "prompt"
+
+ # Test saving
+ sync_client._save_serialized_file(content, path, file_type)
+ saved_path = sync_client.base_dir / path
+ saved_path = saved_path.parent / f"{saved_path.stem}.{file_type}"
+ assert saved_path.exists()
+
+ # Test reading
+ read_content = sync_client.get_file_content(path, file_type)
+ assert read_content == content
+
+def test_pull_file(sync_client, mock_client):
+ """Test pulling a single file."""
+ mock_file = Mock()
+ mock_file.type = "prompt"
+ mock_file.path = "test/path"
+ mock_file.raw_file_content = "test content"
+
+ mock_client.files.retrieve_by_path.return_value = mock_file
+
+ success = sync_client._pull_file("test/path.prompt")
+ assert success
+ assert mock_client.files.retrieve_by_path.called
+
+ # Verify file was saved
+ saved_path = sync_client.base_dir / "test/path.prompt"
+ assert saved_path.exists()
+ assert saved_path.read_text() == "test content"
+
+def test_pull_directory(sync_client, mock_client):
+ """Test pulling multiple files from a directory."""
+ # Create mock responses for different pages
+ def mock_list_files(*args, **kwargs):
+ page = kwargs.get('page', 1)
+ mock_response = Mock()
+
+ if page == 1:
+ mock_response.records = [
+ Mock(
+ type="prompt",
+ path="test/path1",
+ raw_file_content="content1"
+ ),
+ Mock(
+ type="agent",
+ path="test/path2",
+ raw_file_content="content2"
+ )
+ ]
+ else:
+ # Return empty list for subsequent pages
+ mock_response.records = []
+
+ return mock_response
+
+ # Set up the mock to use our function
+ mock_client.files.list_files.side_effect = mock_list_files
+
+ successful, failed = sync_client._pull_directory("test")
+ assert len(successful) == 2
+ assert len(failed) == 0
+
+ # Verify files were saved
+ assert (sync_client.base_dir / "test/path1.prompt").exists()
+ assert (sync_client.base_dir / "test/path2.agent").exists()
+
+ # Verify the mock was called with correct parameters
+ mock_client.files.list_files.assert_any_call(
+ type=["prompt", "agent"],
+ page=1,
+ include_raw_file_content=True,
+ environment=None,
+ path="test"
+ )
+
+def test_error_handling(sync_client):
+ """Test error handling in various scenarios."""
+ # Test file not found
+ with pytest.raises(HumanloopRuntimeError, match="Local file not found"):
+ sync_client.get_file_content("nonexistent", "prompt")
+
+ # Test invalid file type
+ with pytest.raises(ValueError, match="Unsupported file type"):
+ sync_client._pull_file("test.txt")
+
+ # Test API error
+ with patch.object(sync_client.client.files, 'retrieve_by_path', side_effect=Exception("API Error")):
+ assert not sync_client._pull_file("test.prompt")
+
+def test_cache_functionality(sync_client):
+ """Test LRU cache functionality."""
+ # Save a test file
+ content = "test content"
+ path = "test/path"
+ file_type = "prompt"
+ sync_client._save_serialized_file(content, path, file_type)
+
+ # First read should hit disk
+ sync_client.get_file_content(path, file_type)
+
+ # Modify file on disk
+ saved_path = sync_client.base_dir / f"{path}.{file_type}"
+ saved_path.write_text("modified content")
+
+ # Second read should use cache
+ assert sync_client.get_file_content(path, file_type) == content
+
+ # Clear cache and verify new content is read
+ sync_client.clear_cache()
+ assert sync_client.get_file_content(path, file_type) == "modified content"
\ No newline at end of file
From f9e2744f66e33c87e1d7372219a4c4719a2864c0 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Sun, 11 May 2025 12:51:59 +0100
Subject: [PATCH 17/39] Rename tests dir from 'internal' to 'custom'
---
.fernignore | 2 +-
.gitignore | 1 +
tests/custom/README.md | 19 +++++++++++++++++++
tests/custom/integration/test_evals.py | 2 +-
4 files changed, 22 insertions(+), 2 deletions(-)
diff --git a/.fernignore b/.fernignore
index 176a2fc7..26eace82 100644
--- a/.fernignore
+++ b/.fernignore
@@ -18,7 +18,7 @@ src/humanloop/cli
## Tests
-tests/internal
+tests/custom
## CI
diff --git a/.gitignore b/.gitignore
index a55ede77..22418efe 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,5 +5,6 @@ poetry.toml
.ruff_cache/
.vscode
.env
+.env.test
tests/assets/*.jsonl
tests/assets/*.parquet
diff --git a/tests/custom/README.md b/tests/custom/README.md
index e69de29b..14ff7ed4 100644
--- a/tests/custom/README.md
+++ b/tests/custom/README.md
@@ -0,0 +1,19 @@
+# Custom Tests Directory
+
+This directory contains custom tests for the Humanloop Python SDK. While the main SDK is auto-generated using [Fern](https://buildwithfern.com/), this directory allows us to add our own test implementations that won't be overwritten during regeneration.
+
+## Why Custom Tests?
+
+- **Preservation**: Tests in this directory won't be overwritten when regenerating the SDK
+- **Custom Implementation**: Allows testing of our own implementations beyond the auto-generated code
+- **Integration**: Enables testing of how our custom code works with the auto-generated SDK
+
+## Running Tests
+
+```bash
+# Run all custom tests
+pytest tests/custom/
+
+# Run specific test file
+pytest tests/custom/sync/test_sync_client.py
+```
diff --git a/tests/custom/integration/test_evals.py b/tests/custom/integration/test_evals.py
index 49bbb6dc..be959649 100644
--- a/tests/custom/integration/test_evals.py
+++ b/tests/custom/integration/test_evals.py
@@ -4,7 +4,7 @@
import pytest
from humanloop.client import Humanloop
from humanloop.error import HumanloopRuntimeError
-from tests.integration.conftest import TestIdentifiers
+from tests.custom.integration.conftest import TestIdentifiers
def test_eval_run_works_on_online_files(
From 49b10784a6902cb5c85f668fd0b3be9b29cf585f Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Sun, 11 May 2025 22:46:21 +0100
Subject: [PATCH 18/39] fixed fixtures, some tests, linting
---
.gitignore | 1 +
src/humanloop/client.py | 32 ++--
src/humanloop/files/client.py | 3 +-
src/humanloop/files/raw_client.py | 2 +-
src/humanloop/overload.py | 34 ++--
src/humanloop/sync/sync_client.py | 153 +++++++++++-------
tests/custom/conftest.py | 129 +--------------
tests/custom/integration/conftest.py | 29 +++-
tests/custom/integration/test_sync.py | 146 +++++++++--------
.../{test_sync_client.py => test_client.py} | 76 +++++----
10 files changed, 274 insertions(+), 331 deletions(-)
rename tests/custom/sync/{test_sync_client.py => test_client.py} (84%)
diff --git a/.gitignore b/.gitignore
index 22418efe..cf60f9b1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,4 @@ poetry.toml
.env.test
tests/assets/*.jsonl
tests/assets/*.parquet
+humanloop
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index ee06b6a9..e9166229 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -142,11 +142,7 @@ def __init__(
)
self.use_local_files = use_local_files
- self._sync_client = SyncClient(
- client=self,
- base_dir=files_directory,
- cache_size=cache_size
- )
+ self._sync_client = SyncClient(client=self, base_dir=files_directory, cache_size=cache_size)
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
@@ -156,15 +152,15 @@ def __init__(
# and the @flow decorator providing the trace_id
self.prompts = overload_log(client=self.prompts)
self.prompts = overload_call(client=self.prompts)
- self.prompts = overload_with_local_files(
- client=self.prompts,
+ self.prompts = overload_with_local_files( # type: ignore [assignment]
+ client=self.prompts,
sync_client=self._sync_client,
- use_local_files=self.use_local_files
+ use_local_files=self.use_local_files,
)
- self.agents = overload_with_local_files(
- client=self.agents,
+ self.agents = overload_with_local_files( # type: ignore [assignment]
+ client=self.agents,
sync_client=self._sync_client,
- use_local_files=self.use_local_files
+ use_local_files=self.use_local_files,
)
self.flows = overload_log(client=self.flows)
self.tools = overload_log(client=self.tools)
@@ -387,10 +383,7 @@ def agent():
attributes=attributes,
)
- def pull(self,
- environment: str | None = None,
- path: str | None = None
- ) -> Tuple[List[str], List[str]]:
+ def pull(self, environment: str | None = None, path: str | None = None) -> Tuple[List[str], List[str]]:
"""Pull Prompt and Agent files from Humanloop to local filesystem.
This method will:
@@ -425,12 +418,9 @@ def pull(self,
If not provided, all Prompt and Agent files will be pulled.
:return: List of successfully processed file paths.
"""
- return self._sync_client.pull(
- environment=environment,
- path=path
- )
+ return self._sync_client.pull(environment=environment, path=path)
+
-
class AsyncHumanloop(AsyncBaseHumanloop):
"""
See docstring of AsyncBaseHumanloop.
@@ -438,4 +428,4 @@ class AsyncHumanloop(AsyncBaseHumanloop):
TODO: Add custom evaluation utilities for async case.
"""
- pass
\ No newline at end of file
+ pass
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index b4fd4274..0f8c69fc 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -118,7 +118,8 @@ def list_files(
def retrieve_by_path(
self,
*,
- path: str,
+ # @TODO: @ale, can this be None?
+ path: str | None = None,
environment: typing.Optional[str] = None,
include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 02f371ac..9b9bf17f 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -132,7 +132,7 @@ def list_files(
def retrieve_by_path(
self,
*,
- path: str,
+ path: str | None = None,
environment: typing.Optional[str] = None,
include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index 64638a65..5cbb813c 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -127,58 +127,60 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
return client
+
def _get_file_type_from_client(client: Union[PromptsClient, AgentsClient]) -> FileType:
"""Get the file type based on the client type."""
- if isinstance(client, PromptsClient):
- return "prompt"
+ if isinstance(client, PromptsClient):
+ return "prompt"
elif isinstance(client, AgentsClient):
return "agent"
else:
raise ValueError(f"Unsupported client type: {type(client)}")
+
def overload_with_local_files(
- client: Union[PromptsClient, AgentsClient],
+ client: Union[PromptsClient, AgentsClient],
sync_client: SyncClient,
use_local_files: bool,
) -> Union[PromptsClient, AgentsClient]:
"""Overload call and log methods to handle local files when use_local_files is True.
-
+
When use_local_files is True, the following prioritization strategy is used:
1. Direct Parameters: If {file_type} parameters are provided directly (as a PromptKernelRequestParams or AgentKernelRequestParams object),
these take precedence and the local file is ignored.
2. Version/Environment: If version_id or environment is specified, the remote version is used instead
of the local file.
3. Local File: If neither of the above are specified, attempts to use the local file at the given path.
-
+
For example, with a prompt client:
- If prompt={model: "gpt-4", ...} is provided, uses those parameters directly
- If version_id="123" is provided, uses that remote version
- Otherwise, tries to load from the local file at the given path
-
+
Args:
client: The client to overload (PromptsClient or AgentsClient)
sync_client: The sync client used for file operations
use_local_files: Whether to enable local file handling
-
+
Returns:
The client with overloaded methods
-
+
Raises:
HumanloopRuntimeError: If use_local_files is True and local file cannot be accessed
"""
- original_call = client._call if hasattr(client, '_call') else client.call
- original_log = client._log if hasattr(client, '_log') else client.log
+ original_call = client._call if hasattr(client, "_call") else client.call
+ original_log = client._log if hasattr(client, "_log") else client.log
file_type = _get_file_type_from_client(client)
def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
- if "id" in kwargs and "path" in kwargs:
+ if "id" in kwargs and "path" in kwargs:
raise HumanloopRuntimeError(f"Can only specify one of `id` or `path` when {function_name}ing a {file_type}")
# Handle local files if enabled
if use_local_files and "path" in kwargs:
# Check if version_id or environment is specified
use_remote = any(["version_id" in kwargs, "environment" in kwargs])
normalized_path = sync_client._normalize_path(kwargs["path"])
-
+
if use_remote:
raise HumanloopRuntimeError(
f"Cannot use local file for `{normalized_path}` as version_id or environment was specified. "
@@ -196,7 +198,7 @@ def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
else:
file_content = sync_client.get_file_content(normalized_path, file_type)
kwargs[file_type] = file_content
- except (HumanloopRuntimeError) as e:
+ except HumanloopRuntimeError as e:
# Re-raise with more context
raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
@@ -217,6 +219,6 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
def _overload_log(self, **kwargs) -> PromptCallResponse:
return _overload(self, "log", **kwargs)
- client.call = types.MethodType(_overload_call, client)
- client.log = types.MethodType(_overload_log, client)
- return client
\ No newline at end of file
+ client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
+ client.log = types.MethodType(_overload_log, client) # type: ignore [assignment]
+ return client
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 4d6f9bdb..260eeaa4 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -2,6 +2,7 @@
from pathlib import Path
from typing import List, Tuple, TYPE_CHECKING
from functools import lru_cache
+import typing
from humanloop.types import FileType
import time
from humanloop.error import HumanloopRuntimeError
@@ -23,49 +24,54 @@
# Default cache size for file content caching
DEFAULT_CACHE_SIZE = 100
+
def format_api_error(error: Exception) -> str:
"""Format API error messages to be more user-friendly."""
error_msg = str(error)
if "status_code" not in error_msg or "body" not in error_msg:
return error_msg
-
+
try:
# Extract the body part and parse as JSON
body_str = error_msg.split("body: ")[1]
# Convert Python dict string to valid JSON by replacing single quotes with double quotes
body_str = body_str.replace("'", '"')
body = json.loads(body_str)
-
+
# Get the detail from the body
detail = body.get("detail", {})
-
+
# Prefer description, fall back to msg
return detail.get("description") or detail.get("msg") or error_msg
except Exception as e:
logger.debug(f"Failed to parse error message: {str(e)}")
return error_msg
+
+_SERIALIZABLE_FILE_TYPES_TYPE = typing.Literal["prompt", "agent"]
+
+
class SyncClient:
"""Client for managing synchronization between local filesystem and Humanloop.
-
+
This client provides file synchronization between Humanloop and the local filesystem,
- with built-in caching for improved performance. The cache uses Python's LRU (Least
- Recently Used) cache to automatically manage memory usage by removing least recently
+ with built-in caching for improved performance. The cache uses Python's LRU (Least
+ Recently Used) cache to automatically manage memory usage by removing least recently
accessed files when the cache is full.
-
+
The cache is automatically updated when files are pulled or saved, and can be
manually cleared using the clear_cache() method.
"""
-
+
# File types that can be serialized to/from the filesystem
SERIALIZABLE_FILE_TYPES = ["prompt", "agent"]
-
+
def __init__(
- self,
+ self,
client: "BaseHumanloop",
base_dir: str = "humanloop",
cache_size: int = DEFAULT_CACHE_SIZE,
- log_level: int = logging.WARNING
+ log_level: int = logging.WARNING,
):
"""
Parameters
@@ -82,20 +88,23 @@ def __init__(
logger.setLevel(log_level)
# Create a new cached version of get_file_content with the specified cache size
- self.get_file_content = lru_cache(maxsize=cache_size)(self._get_file_content_impl)
+ # @TODO: @ale, maybe move the cache to the client?
+ self.get_file_content = lru_cache(maxsize=cache_size)( # type: ignore [assignment]
+ self._get_file_content_implementation,
+ )
- def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
+ def _get_file_content_implementation(self, path: str, file_type: FileType) -> str:
"""Implementation of get_file_content without the cache decorator.
-
+
This is the actual implementation that gets wrapped by lru_cache.
-
+
Args:
path: The normalized path to the file (without extension)
file_type: The type of file (Prompt or Agent)
-
+
Returns:
The raw file content
-
+
Raises:
HumanloopRuntimeError: In two cases:
1. If the file doesn't exist at the expected location
@@ -106,10 +115,10 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
local_path = self.base_dir / path
# Add appropriate extension
local_path = local_path.parent / f"{local_path.stem}.{file_type}"
-
+
if not local_path.exists():
raise HumanloopRuntimeError(f"Local file not found: {local_path}")
-
+
try:
# Read the raw file content
with open(local_path) as f:
@@ -121,25 +130,26 @@ def _get_file_content_impl(self, path: str, file_type: FileType) -> str:
def get_file_content(self, path: str, file_type: FileType) -> str:
"""Get the raw file content of a file from cache or filesystem.
-
+
This method uses an LRU cache to store file contents. When the cache is full,
the least recently accessed files are automatically removed to make space.
-
+
Args:
path: The normalized path to the file (without extension)
file_type: The type of file (Prompt or Agent)
-
+
Returns:
The raw file content
-
+
Raises:
HumanloopRuntimeError: If the file doesn't exist or can't be read
"""
- return self._get_file_content_impl(path, file_type)
+ return self._get_file_content_implementation(path, file_type)
def clear_cache(self) -> None:
"""Clear the LRU cache."""
- self.get_file_content.cache_clear()
+ # @TODO: @ale, why not put the cache on the client? This is a bit of a hack.
+ self.get_file_content.cache_clear() # type: ignore [attr-defined]
def _normalize_path(self, path: str) -> str:
"""Normalize the path by:
@@ -149,17 +159,22 @@ def _normalize_path(self, path: str) -> str:
"""
# Convert to Path object to handle platform-specific separators
path_obj = Path(path)
-
+
# Remove extension, convert to string with forward slashes, and remove leading/trailing slashes
- normalized = str(path_obj.with_suffix(''))
+ normalized = str(path_obj.with_suffix(""))
# Replace all backslashes and normalize multiple forward slashes
- return '/'.join(part for part in normalized.replace('\\', '/').split('/') if part)
+ return "/".join(part for part in normalized.replace("\\", "/").split("/") if part)
def is_file(self, path: str) -> bool:
"""Check if the path is a file by checking for .prompt or .agent extension."""
- return path.endswith('.prompt') or path.endswith('.agent')
-
- def _save_serialized_file(self, serialized_content: str, file_path: str, file_type: FileType) -> None:
+ return path.endswith(".prompt") or path.endswith(".agent")
+
+ def _save_serialized_file(
+ self,
+ serialized_content: str,
+ file_path: str,
+ file_type: typing.Literal["prompt", "agent"],
+ ) -> None:
"""Save serialized file to local filesystem."""
try:
# Create full path including base_dir prefix
@@ -173,28 +188,32 @@ def _save_serialized_file(self, serialized_content: str, file_path: str, file_ty
# Write raw file content to file
with open(new_path, "w") as f:
f.write(serialized_content)
-
+
# Clear the cache when a file is saved
self.clear_cache()
except Exception as e:
logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
raise
- def _pull_file(self, path: str, environment: str | None = None) -> bool:
+ def _pull_file(
+ self,
+ path: str | None = None,
+ environment: str | None = None,
+ ) -> bool:
"""Pull a specific file from Humanloop to local filesystem.
-
+
Returns:
True if the file was successfully pulled, False otherwise
-
+
Raises:
HumanloopRuntimeError: If there's an error communicating with the API
"""
try:
file = self.client.files.retrieve_by_path(
- path=path,
+ path=path,
environment=environment,
- include_raw_file_content=True
- )
+ include_raw_file_content=True,
+ )
except Exception as e:
logger.error(f"Failed to pull file {path}: {format_api_error(e)}")
return False
@@ -202,24 +221,34 @@ def _pull_file(self, path: str, environment: str | None = None) -> bool:
if file.type not in self.SERIALIZABLE_FILE_TYPES:
raise ValueError(f"Unsupported file type: {file.type}")
+ file_type: _SERIALIZABLE_FILE_TYPES_TYPE = typing.cast(
+ _SERIALIZABLE_FILE_TYPES_TYPE,
+ file.type,
+ )
+
try:
- self._save_serialized_file(file.raw_file_content, file.path, file.type)
+ self._save_serialized_file(
+ serialized_content=file.raw_file_content, # type: ignore [union-attr, arg-type]
+ file_path=file.path,
+ file_type=file_type,
+ )
return True
except Exception as e:
logger.error(f"Failed to save file {path}: {str(e)}")
return False
- def _pull_directory(self,
- path: str | None = None,
- environment: str | None = None,
- ) -> Tuple[List[str], List[str]]:
+ def _pull_directory(
+ self,
+ path: str | None = None,
+ environment: str | None = None,
+ ) -> Tuple[List[str], List[str]]:
"""Sync Prompt and Agent files from Humanloop to local filesystem.
-
+
Returns:
Tuple of two lists:
- First list contains paths of successfully synced files
- Second list contains paths of files that failed to sync
-
+
Raises:
HumanloopRuntimeError: If there's an error communicating with the API
"""
@@ -233,11 +262,11 @@ def _pull_directory(self,
try:
logger.debug(f"`{path}`: Requesting page {page} of files")
response = self.client.files.list_files(
- type=["prompt", "agent"],
+ type=["prompt", "agent"],
page=page,
include_raw_file_content=True,
environment=environment,
- path=path
+ path=path,
)
if len(response.records) == 0:
@@ -253,14 +282,24 @@ def _pull_directory(self,
logger.warning(f"Skipping unsupported file type: {file.type}")
continue
+ file_type: _SERIALIZABLE_FILE_TYPES_TYPE = typing.cast(
+ _SERIALIZABLE_FILE_TYPES_TYPE,
+ file.type,
+ )
+
# Skip if no raw file content
- if not getattr(file, "raw_file_content", None):
+ # @TODO: @ale, inconsistent, other places we are throwing if file is not serialisable
+ if not getattr(file, "raw_file_content", None) or not file.raw_file_content: # type: ignore [union-attr]
logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}")
continue
try:
logger.debug(f"Writing {file.type} {file.path} to disk")
- self._save_serialized_file(file.raw_file_content, file.path, file.type)
+ self._save_serialized_file(
+ serialized_content=file.raw_file_content, # type: ignore [union-attr]
+ file_path=file.path,
+ file_type=file_type,
+ )
successful_files.append(file.path)
except Exception as e:
failed_files.append(file.path)
@@ -293,24 +332,29 @@ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple
Tuple of two lists:
- First list contains paths of successfully synced files
- Second list contains paths of files that failed to sync
-
+
Raises:
HumanloopRuntimeError: If there's an error communicating with the API
"""
start_time = time.time()
normalized_path = self._normalize_path(path) if path else None
- logger.info(f"Starting pull operation: path={normalized_path or '(root)'}, environment={environment or '(default)'}")
+ logger.info(
+ f"Starting pull operation: path={normalized_path or '(root)'}, environment={environment or '(default)'}"
+ )
try:
if path is None:
# Pull all files from the root
logger.debug("Pulling all files from root")
- successful_files, failed_files = self._pull_directory(None, environment)
+ successful_files, failed_files = self._pull_directory(
+ path=None,
+ environment=environment,
+ )
else:
if self.is_file(path.strip()):
logger.debug(f"Pulling file: {normalized_path}")
- if self._pull_file(normalized_path, environment):
+ if self._pull_file(path=normalized_path, environment=environment):
successful_files = [path]
failed_files = []
else:
@@ -322,8 +366,7 @@ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple
duration_ms = int((time.time() - start_time) * 1000)
logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
-
+
return successful_files, failed_files
except Exception as e:
raise HumanloopRuntimeError(f"Pull operation failed: {str(e)}")
-
diff --git a/tests/custom/conftest.py b/tests/custom/conftest.py
index a8c78ac5..c1f64bbf 100644
--- a/tests/custom/conftest.py
+++ b/tests/custom/conftest.py
@@ -1,16 +1,9 @@
-from dataclasses import asdict, dataclass
-import os
-import random
-import string
-import time
-from typing import Callable, Generator
+from typing import Generator
import typing
from unittest.mock import MagicMock
-from dotenv import load_dotenv
import pytest
from humanloop.base_client import BaseHumanloop
-from humanloop.client import Humanloop
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.processor import HumanloopSpanProcessor
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
@@ -162,123 +155,3 @@ def call_llm_messages() -> list[ChatCompletionMessageParam]:
"content": "Bonjour!",
},
]
-
-
-@dataclass
-class APIKeys:
- openai: str
- humanloop: str
-
-
-@pytest.fixture(scope="session")
-def api_keys() -> APIKeys:
- openai_key = os.getenv("OPENAI_API_KEY")
- humanloop_key = os.getenv("HUMANLOOP_API_KEY")
- for key_name, key_value in [
- ("OPENAI_API_KEY", openai_key),
- ("HUMANLOOP_API_KEY", humanloop_key),
- ]:
- if key_value is None:
- raise ValueError(f"{key_name} is not set in .env file")
- api_keys = APIKeys(
- openai=openai_key, # type: ignore [arg-type]
- humanloop=humanloop_key, # type: ignore [arg-type]
- )
- for key, value in asdict(api_keys).items():
- if value is None:
- raise ValueError(f"{key.upper()} key is not set in .env file")
- return api_keys
-
-
-@pytest.fixture(scope="session")
-def humanloop_client(request, api_keys: APIKeys) -> Humanloop:
- """Create a Humanloop client for testing."""
- use_local_files = getattr(request, "param", False)
- return Humanloop(
- api_key=api_keys.humanloop,
- base_url="http://localhost:80/v5",
- use_local_files=use_local_files
- )
-
-
-@pytest.fixture(scope="session", autouse=True)
-def load_env():
- load_dotenv()
-
-
-def directory_cleanup(directory_id: str, humanloop_client: Humanloop):
- response = humanloop_client.directories.get(directory_id)
- for file in response.files:
- file_id = file.id
- if file.type == "prompt":
- client = humanloop_client.prompts # type: ignore [assignment]
- elif file.type == "tool":
- client = humanloop_client.tools # type: ignore [assignment]
- elif file.type == "dataset":
- client = humanloop_client.datasets # type: ignore [assignment]
- elif file.type == "evaluator":
- client = humanloop_client.evaluators # type: ignore [assignment]
- elif file.type == "flow":
- client = humanloop_client.flows # type: ignore [assignment]
- elif file.type == "agent":
- client = humanloop_client.agents # type: ignore [assignment]
- client.delete(file_id)
-
- for subdirectory in response.subdirectories:
- directory_cleanup(
- directory_id=subdirectory.id,
- humanloop_client=humanloop_client,
- )
-
- humanloop_client.directories.delete(id=response.id)
-
-
-@dataclass
-class DirectoryIdentifiers:
- path: str
- id: str
-
-
-@pytest.fixture()
-def test_directory(
- humanloop_client: Humanloop,
-) -> Generator[DirectoryIdentifiers, None, None]:
- # Generate a random alphanumeric directory name to avoid conflicts
- def get_random_string(length: int = 16) -> str:
- return "".join([random.choice(string.ascii_letters + "0123456789") for _ in range(length)])
-
- directory_path = "SDK_integ_test_" + get_random_string()
- response = humanloop_client.directories.create(path=directory_path)
- assert response.path == directory_path
- try:
- yield DirectoryIdentifiers(
- path=response.path,
- id=response.id,
- )
- finally:
- time.sleep(1)
- directory_cleanup(response.id, humanloop_client)
-
-
-@pytest.fixture()
-def get_test_path(test_directory: DirectoryIdentifiers) -> Callable[[str], str]:
- def generate_path(name: str) -> str:
- return f"{test_directory.path}/{name}"
-
- return generate_path
-
-
-# @pytest.fixture(scope="session", autouse=True)
-# def cleanup_test_dirs(humanloop_client: Humanloop):
-# def _cleanup_all_test_dirs():
-# dirs = humanloop_client.directories.list()
-# for dir in dirs:
-# if dir.path.startswith("SDK_integ_test_"):
-# directory_cleanup(
-# directory_id=dir.id,
-# humanloop_client=humanloop_client,
-# )
-
-# _cleanup_all_test_dirs()
-# yield
-# _cleanup_all_test_dirs()
diff --git a/tests/custom/integration/conftest.py b/tests/custom/integration/conftest.py
index 72611b5d..41e881c9 100644
--- a/tests/custom/integration/conftest.py
+++ b/tests/custom/integration/conftest.py
@@ -1,6 +1,7 @@
from contextlib import contextmanager, redirect_stdout
from dataclasses import dataclass
import os
+import time
from typing import Any, ContextManager, Generator
import io
from typing import TextIO
@@ -40,18 +41,42 @@ def humanloop_test_client() -> Humanloop:
dotenv.load_dotenv()
if not os.getenv("HUMANLOOP_API_KEY"):
pytest.fail("HUMANLOOP_API_KEY is not set for integration tests")
- return Humanloop(api_key=os.getenv("HUMANLOOP_API_KEY")) # type: ignore [return-value]
+ return Humanloop(api_key=os.getenv("HUMANLOOP_API_KEY"))
@pytest.fixture(scope="function")
def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None]:
+ def cleanup_directory(directory_id: str):
+ directory_response = humanloop_test_client.directories.get(id=directory_id)
+ for subdirectory in directory_response.subdirectories:
+ cleanup_directory(subdirectory.id)
+ for file in directory_response.files:
+ match file.type:
+ case "prompt":
+ humanloop_test_client.prompts.delete(id=file.id)
+ case "agent":
+ humanloop_test_client.agents.delete(id=file.id)
+ case "dataset":
+ humanloop_test_client.datasets.delete(id=file.id)
+ case "evaluator":
+ humanloop_test_client.evaluators.delete(id=file.id)
+ case "flow":
+ humanloop_test_client.flows.delete(id=file.id)
+ case _:
+ raise ValueError(f"Unknown file type: {file.type}")
+ humanloop_test_client.directories.delete(id=directory_response.id)
+
path = f"SDK_INTEGRATION_TEST_{uuid.uuid4()}"
+ response = None
try:
response = humanloop_test_client.directories.create(path=path)
yield response.path
- humanloop_test_client.directories.delete(id=response.id)
except Exception as e:
pytest.fail(f"Failed to create directory {path}: {e}")
+ finally:
+ if response:
+ time.sleep(5)
+ cleanup_directory(response.id)
@pytest.fixture(scope="function")
diff --git a/tests/custom/integration/test_sync.py b/tests/custom/integration/test_sync.py
index a2441b4b..348bcb7e 100644
--- a/tests/custom/integration/test_sync.py
+++ b/tests/custom/integration/test_sync.py
@@ -1,4 +1,4 @@
-from typing import List, NamedTuple, Union
+from typing import Generator, List, NamedTuple, Union
from pathlib import Path
import pytest
from humanloop import Humanloop, FileType, AgentResponse, PromptResponse
@@ -14,7 +14,9 @@ class SyncableFile(NamedTuple):
@pytest.fixture
-def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[SyncableFile]:
+def test_file_structure(
+ humanloop_test_client: Humanloop, sdk_test_dir: str
+) -> Generator[list[SyncableFile], None, None]:
"""Creates a predefined structure of files in Humanloop for testing sync"""
files: List[SyncableFile] = [
SyncableFile(
@@ -47,27 +49,25 @@ def test_file_structure(humanloop_client: Humanloop, get_test_path) -> List[Sync
# Create the files in Humanloop
created_files = []
for file in files:
- full_path = get_test_path(file.path)
+ full_path = f"{sdk_test_dir}/{file.path}"
response: Union[AgentResponse, PromptResponse]
if file.type == "prompt":
- response = humanloop_client.prompts.upsert(
+ response = humanloop_test_client.prompts.upsert(
path=full_path,
model=file.model,
)
elif file.type == "agent":
- response = humanloop_client.agents.upsert(
+ response = humanloop_test_client.agents.upsert(
path=full_path,
model=file.model,
)
- created_files.append(SyncableFile(
- path=full_path,
- type=file.type,
- model=file.model,
- id=response.id,
- version_id=response.version_id
- ))
+ created_files.append(
+ SyncableFile(
+ path=full_path, type=file.type, model=file.model, id=response.id, version_id=response.version_id
+ )
+ )
- return created_files
+ yield created_files
@pytest.fixture
@@ -82,10 +82,10 @@ def cleanup_local_files():
shutil.rmtree(local_dir)
-def test_pull_basic(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+def test_pull_basic(humanloop_test_client: Humanloop, test_file_structure: List[SyncableFile]):
"""Test that humanloop.sync() correctly syncs remote files to local filesystem"""
# Run the sync
- successful_files = humanloop_client.pull()
+ successful_files = humanloop_test_client.pull()
# Verify each file was synced correctly
for file in test_file_structure:
@@ -103,115 +103,129 @@ def test_pull_basic(humanloop_client: Humanloop, test_file_structure: List[Synca
content = local_path.read_text()
assert content, f"File at {local_path} should not be empty"
-@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
-def test_overload_with_local_files(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+
+def test_overload_with_local_files(
+ humanloop_test_client: Humanloop,
+ test_file_structure: List[SyncableFile],
+):
"""Test that overload_with_local_files correctly handles local files.
-
+
Flow:
1. Create files in remote (via test_file_structure fixture)
2. Pull files locally
3. Test using the pulled files
"""
# First pull the files locally
- humanloop_client.pull()
+ humanloop_test_client.pull()
# Test using the pulled files
test_file = test_file_structure[0] # Use the first test file
extension = f".{test_file.type}"
local_path = Path("humanloop") / f"{test_file.path}{extension}"
-
+
# Verify the file was pulled correctly
assert local_path.exists(), f"Expected pulled file at {local_path}"
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
# Test call with pulled file
+ response: Union[AgentResponse, PromptResponse]
if test_file.type == "prompt":
- response = humanloop_client.prompts.call(path=test_file.path, messages=[{"role": "user", "content": "Testing"}])
+ response = humanloop_test_client.prompts.call( # type: ignore [assignment]
+ path=test_file.path, messages=[{"role": "user", "content": "Testing"}]
+ )
assert response is not None
elif test_file.type == "agent":
- response = humanloop_client.agents.call(path=test_file.path, messages=[{"role": "user", "content": "Testing"}])
+ response = humanloop_test_client.agents.call( # type: ignore [assignment]
+ path=test_file.path, messages=[{"role": "user", "content": "Testing"}]
+ )
assert response is not None
# Test with invalid path
with pytest.raises(HumanloopRuntimeError):
- if test_file.type == "prompt":
- humanloop_client.prompts.call(path="invalid/path")
- elif test_file.type == "agent":
- humanloop_client.agents.call(path="invalid/path")
+ match test_file.type:
+ case "prompt":
+ sub_agent = humanloop_test_client.agents
+ case "agent":
+ sub_agent = humanloop_test_client.agents
+ case _:
+ raise ValueError(f"Invalid file type: {test_file.type}")
+ sub_agent.call(path="invalid/path")
-@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
-def test_overload_log_with_local_files(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+
+def test_overload_log_with_local_files(
+ humanloop_test_client: Humanloop,
+ test_file_structure: List[SyncableFile],
+ sdk_test_dir: str,
+):
"""Test that overload_with_local_files correctly handles local files for log operations.
-
+
Flow:
1. Create files in remote (via test_file_structure fixture)
2. Pull files locally
3. Test logging using the pulled files
- :param humanloop_client: The Humanloop client with local files enabled
+ :param humanloop_test_client: The Humanloop client with local files enabled
:param test_file_structure: List of test files created in remote
:param cleanup_local_files: Fixture to clean up local files after test
"""
# First pull the files locally
- humanloop_client.pull()
+ humanloop_test_client.pull()
# Test using the pulled files
test_file = test_file_structure[0] # Use the first test file
extension = f".{test_file.type}"
local_path = Path("humanloop") / f"{test_file.path}{extension}"
-
+
# Verify the file was pulled correctly
assert local_path.exists(), f"Expected pulled file at {local_path}"
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
# Test log with pulled file
if test_file.type == "prompt":
- response = humanloop_client.prompts.log(
- path=test_file.path,
- messages=[{"role": "user", "content": "Testing"}],
- output="Test response"
+ response = humanloop_test_client.prompts.log( # type: ignore [assignment]
+ path=test_file.path, messages=[{"role": "user", "content": "Testing"}], output="Test response"
)
assert response is not None
elif test_file.type == "agent":
- response = humanloop_client.agents.log(
- path=test_file.path,
- messages=[{"role": "user", "content": "Testing"}],
- output="Test response"
+ response = humanloop_test_client.agents.log( # type: ignore [assignment]
+ path=test_file.path, messages=[{"role": "user", "content": "Testing"}], output="Test response"
)
assert response is not None
# Test with invalid path
with pytest.raises(HumanloopRuntimeError):
if test_file.type == "prompt":
- humanloop_client.prompts.log(
- path="invalid/path",
+ humanloop_test_client.prompts.log(
+ path=f"{sdk_test_dir}/invalid/path",
messages=[{"role": "user", "content": "Testing"}],
- output="Test response"
+ output="Test response",
)
elif test_file.type == "agent":
- humanloop_client.agents.log(
- path="invalid/path",
+ humanloop_test_client.agents.log(
+ path=f"{sdk_test_dir}/invalid/path",
messages=[{"role": "user", "content": "Testing"}],
- output="Test response"
+ output="Test response",
)
-@pytest.mark.parametrize("humanloop_client", [True], indirect=True)
-def test_overload_version_environment_handling(humanloop_client: Humanloop, test_file_structure: List[SyncableFile], cleanup_local_files):
+
+def test_overload_version_environment_handling(
+ humanloop_test_client: Humanloop, test_file_structure: List[SyncableFile]
+):
"""Test that overload_with_local_files correctly handles version_id and environment parameters.
-
+
Flow:
1. Create files in remote (via test_file_structure fixture)
2. Pull files locally
3. Test that version_id/environment parameters cause remote usage with warning
"""
# First pull the files locally
- humanloop_client.pull()
+ humanloop_test_client.pull()
# Test using the pulled files
test_file = test_file_structure[0] # Use the first test file
extension = f".{test_file.type}"
local_path = Path("humanloop") / f"{test_file.path}{extension}"
-
+
# Verify the file was pulled correctly
assert local_path.exists(), f"Expected pulled file at {local_path}"
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
@@ -219,49 +233,45 @@ def test_overload_version_environment_handling(humanloop_client: Humanloop, test
# Test with version_id - should use remote with warning
with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
if test_file.type == "prompt":
- response = humanloop_client.prompts.call(
+ response = humanloop_test_client.prompts.call( # type: ignore [assignment]
path=test_file.path,
version_id=test_file.version_id,
- messages=[{"role": "user", "content": "Testing"}]
+ messages=[{"role": "user", "content": "Testing"}],
)
elif test_file.type == "agent":
- response = humanloop_client.agents.call(
+ response = humanloop_test_client.agents.call( # type: ignore [assignment]
path=test_file.path,
version_id=test_file.version_id,
- messages=[{"role": "user", "content": "Testing"}]
+ messages=[{"role": "user", "content": "Testing"}],
)
assert response is not None
# Test with environment - should use remote with warning
with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
if test_file.type == "prompt":
- response = humanloop_client.prompts.call(
- path=test_file.path,
- environment="production",
- messages=[{"role": "user", "content": "Testing"}]
+ response = humanloop_test_client.prompts.call( # type: ignore [assignment]
+ path=test_file.path, environment="production", messages=[{"role": "user", "content": "Testing"}]
)
elif test_file.type == "agent":
- response = humanloop_client.agents.call(
- path=test_file.path,
- environment="production",
- messages=[{"role": "user", "content": "Testing"}]
+ response = humanloop_test_client.agents.call( # type: ignore [assignment]
+ path=test_file.path, environment="production", messages=[{"role": "user", "content": "Testing"}]
)
assert response is not None
# Test with both version_id and environment - should use remote with warning
with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
if test_file.type == "prompt":
- response = humanloop_client.prompts.call(
+ response = humanloop_test_client.prompts.call( # type: ignore [assignment]
path=test_file.path,
version_id=test_file.version_id,
environment="staging",
- messages=[{"role": "user", "content": "Testing"}]
+ messages=[{"role": "user", "content": "Testing"}],
)
elif test_file.type == "agent":
- response = humanloop_client.agents.call(
+ response = humanloop_test_client.agents.call( # type: ignore [assignment]
path=test_file.path,
version_id=test_file.version_id,
environment="staging",
- messages=[{"role": "user", "content": "Testing"}]
+ messages=[{"role": "user", "content": "Testing"}],
)
- assert response is not None
\ No newline at end of file
+ assert response is not None
diff --git a/tests/custom/sync/test_sync_client.py b/tests/custom/sync/test_client.py
similarity index 84%
rename from tests/custom/sync/test_sync_client.py
rename to tests/custom/sync/test_client.py
index 29b2e712..79168223 100644
--- a/tests/custom/sync/test_sync_client.py
+++ b/tests/custom/sync/test_client.py
@@ -1,29 +1,32 @@
+import logging
import pytest
-from pathlib import Path
from unittest.mock import Mock, patch
from humanloop.sync.sync_client import SyncClient
from humanloop.error import HumanloopRuntimeError
-from humanloop.types import FileType
+
@pytest.fixture
def mock_client():
return Mock()
+
@pytest.fixture
def sync_client(mock_client, tmp_path):
return SyncClient(
client=mock_client,
base_dir=str(tmp_path),
cache_size=10,
- log_level=10 # DEBUG level for testing
+ log_level=logging.DEBUG, # DEBUG level for testing # noqa: F821
)
+
def test_init(sync_client, tmp_path):
"""Test basic initialization of SyncClient."""
assert sync_client.base_dir == tmp_path
assert sync_client._cache_size == 10
assert sync_client.SERIALIZABLE_FILE_TYPES == ["prompt", "agent"]
+
def test_normalize_path(sync_client):
"""Test path normalization functionality."""
test_cases = [
@@ -33,10 +36,11 @@ def test_normalize_path(sync_client):
("trailing/slashes/file.agent/", "trailing/slashes/file"),
("multiple//slashes//file.prompt", "multiple/slashes/file"),
]
-
+
for input_path, expected in test_cases:
assert sync_client._normalize_path(input_path) == expected
+
def test_is_file(sync_client):
"""Test file type detection."""
assert sync_client.is_file("test.prompt")
@@ -44,100 +48,94 @@ def test_is_file(sync_client):
assert not sync_client.is_file("test.txt")
assert not sync_client.is_file("test")
+
def test_save_and_read_file(sync_client):
"""Test saving and reading files."""
content = "test content"
path = "test/path"
file_type = "prompt"
-
+
# Test saving
sync_client._save_serialized_file(content, path, file_type)
saved_path = sync_client.base_dir / path
saved_path = saved_path.parent / f"{saved_path.stem}.{file_type}"
assert saved_path.exists()
-
+
# Test reading
read_content = sync_client.get_file_content(path, file_type)
assert read_content == content
+
def test_pull_file(sync_client, mock_client):
"""Test pulling a single file."""
mock_file = Mock()
mock_file.type = "prompt"
mock_file.path = "test/path"
mock_file.raw_file_content = "test content"
-
+
mock_client.files.retrieve_by_path.return_value = mock_file
-
+
success = sync_client._pull_file("test/path.prompt")
assert success
assert mock_client.files.retrieve_by_path.called
-
+
# Verify file was saved
saved_path = sync_client.base_dir / "test/path.prompt"
assert saved_path.exists()
assert saved_path.read_text() == "test content"
+
def test_pull_directory(sync_client, mock_client):
"""Test pulling multiple files from a directory."""
+
# Create mock responses for different pages
def mock_list_files(*args, **kwargs):
- page = kwargs.get('page', 1)
+ page = kwargs.get("page", 1)
mock_response = Mock()
-
+
if page == 1:
mock_response.records = [
- Mock(
- type="prompt",
- path="test/path1",
- raw_file_content="content1"
- ),
- Mock(
- type="agent",
- path="test/path2",
- raw_file_content="content2"
- )
+ Mock(type="prompt", path="test/path1", raw_file_content="content1"),
+ Mock(type="agent", path="test/path2", raw_file_content="content2"),
]
else:
# Return empty list for subsequent pages
mock_response.records = []
-
+
return mock_response
-
+
# Set up the mock to use our function
mock_client.files.list_files.side_effect = mock_list_files
-
+
successful, failed = sync_client._pull_directory("test")
assert len(successful) == 2
assert len(failed) == 0
-
+
# Verify files were saved
assert (sync_client.base_dir / "test/path1.prompt").exists()
assert (sync_client.base_dir / "test/path2.agent").exists()
-
+
# Verify the mock was called with correct parameters
mock_client.files.list_files.assert_any_call(
- type=["prompt", "agent"],
- page=1,
- include_raw_file_content=True,
- environment=None,
- path="test"
+ type=["prompt", "agent"], page=1, include_raw_file_content=True, environment=None, path="test"
)
+
def test_error_handling(sync_client):
"""Test error handling in various scenarios."""
# Test file not found
with pytest.raises(HumanloopRuntimeError, match="Local file not found"):
sync_client.get_file_content("nonexistent", "prompt")
-
+
# Test invalid file type
with pytest.raises(ValueError, match="Unsupported file type"):
sync_client._pull_file("test.txt")
-
+
# Test API error
- with patch.object(sync_client.client.files, 'retrieve_by_path', side_effect=Exception("API Error")):
+ with patch.object(sync_client.client.files, "retrieve_by_path", side_effect=Exception("API Error")):
assert not sync_client._pull_file("test.prompt")
+
def test_cache_functionality(sync_client):
"""Test LRU cache functionality."""
# Save a test file
@@ -145,17 +143,17 @@ def test_cache_functionality(sync_client):
path = "test/path"
file_type = "prompt"
sync_client._save_serialized_file(content, path, file_type)
-
+
# First read should hit disk
sync_client.get_file_content(path, file_type)
-
+
# Modify file on disk
saved_path = sync_client.base_dir / f"{path}.{file_type}"
saved_path.write_text("modified content")
-
+
# Second read should use cache
assert sync_client.get_file_content(path, file_type) == content
-
+
# Clear cache and verify new content is read
sync_client.clear_cache()
- assert sync_client.get_file_content(path, file_type) == "modified content"
\ No newline at end of file
+ assert sync_client.get_file_content(path, file_type) == "modified content"
From feedab7adfda5391faa58b50458c3d8504584c5b Mon Sep 17 00:00:00 2001
From: Andrei Bratu
Date: Mon, 12 May 2025 09:05:30 +0100
Subject: [PATCH 19/39] fix more tests
---
.fernignore | 1 +
poetry.lock | 40 +++++++++++++++++++-
pyproject.toml | 1 +
pytest.ini | 2 +
src/humanloop/overload.py | 42 ++++++++++++++++-----
tests/custom/integration/conftest.py | 5 ++-
tests/custom/integration/test_decorators.py | 1 -
7 files changed, 79 insertions(+), 13 deletions(-)
create mode 100644 pytest.ini
diff --git a/.fernignore b/.fernignore
index 26eace82..fd7adc81 100644
--- a/.fernignore
+++ b/.fernignore
@@ -15,6 +15,7 @@ src/humanloop/decorators
src/humanloop/otel
src/humanloop/sync
src/humanloop/cli
+pytest.ini
## Tests
diff --git a/poetry.lock b/poetry.lock
index 29a15f63..b105f29f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -310,6 +310,21 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "execnet"
+version = "2.1.1"
+description = "execnet: rapid multi-Python deployment"
+optional = false
+python-versions = ">=3.8"
+groups = ["dev"]
+files = [
+ {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
+ {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
+]
+
+[package.extras]
+testing = ["hatch", "pre-commit", "pytest", "tox"]
+
[[package]]
name = "fastavro"
version = "1.10.0"
@@ -1604,6 +1619,27 @@ pytest = ">=7.0.0"
[package.extras]
dev = ["black", "flake8", "isort", "mypy"]
+[[package]]
+name = "pytest-xdist"
+version = "3.6.1"
+description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
+optional = false
+python-versions = ">=3.8"
+groups = ["dev"]
+files = [
+ {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
+ {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
+]
+
+[package.dependencies]
+execnet = ">=2.1"
+pytest = ">=7.0.0"
+
+[package.extras]
+psutil = ["psutil (>=3.0)"]
+setproctitle = ["setproctitle"]
+testing = ["filelock"]
+
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@@ -2415,4 +2451,4 @@ type = ["pytest-mypy"]
[metadata]
lock-version = "2.1"
python-versions = ">=3.9,<4"
-content-hash = "0c9e19c602590fba318f4385529614dae82433689f41a0cb24e386a2ec0f7aaa"
+content-hash = "d9587a36fb01331acd9d45395ab819f88f3402904bf07c324b8cc2b16f2f5d8a"
diff --git a/pyproject.toml b/pyproject.toml
index edebc9fd..66a4c368 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -80,6 +80,7 @@ replicate = "^1.0.3"
ruff = "^0.5.6"
types-jsonschema = "^4.23.0.20240813"
types-protobuf = "^5.29.1.20250208"
+pytest-xdist = "^3.6.1"
[tool.pytest.ini_options]
testpaths = [ "tests" ]
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 00000000..8ab80e5d
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+addopts = -n auto
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index 5cbb813c..e3419a18 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -1,3 +1,4 @@
+from collections import defaultdict
import inspect
import logging
import types
@@ -99,10 +100,13 @@ def _overload_log(
def overload_call(client: PromptsClient) -> PromptsClient:
- client._call = client.call # type: ignore [attr-defined]
+ if not hasattr(client, "_overloads"):
+ client._overloads = defaultdict(list) # type: ignore [attr-defined]
+ if len(client._overloads["call"]) == 0: # type: ignore [attr-defined]
+ client._overloads["call"].append(client.call) # type: ignore [attr-defined]
def _overload_call(self, **kwargs) -> PromptCallResponse:
- # None if not logging inside a decorator
+ # trace_id is None if logging outside a decorator
trace_id = get_trace_id()
if trace_id is not None:
if "trace_parent_id" in kwargs:
@@ -116,14 +120,13 @@ def _overload_call(self, **kwargs) -> PromptCallResponse:
}
try:
- response = self._call(**kwargs)
+ response = client._overloads["call"][0](**kwargs) # type: ignore [attr-defined]
except Exception as e:
# Re-raising as HumanloopRuntimeError so the decorators don't catch it
raise HumanloopRuntimeError from e
return response
- # Replace the original log method with the overloaded one
client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
return client
@@ -168,13 +171,34 @@ def overload_with_local_files(
Raises:
HumanloopRuntimeError: If use_local_files is True and local file cannot be accessed
"""
- original_call = client._call if hasattr(client, "_call") else client.call
- original_log = client._log if hasattr(client, "_log") else client.log
+ if not hasattr(client, "_overloads"):
+ client._overloads = defaultdict(list) # type: ignore [union-attr]
+ # If the method has been overloaded, don't re-add the method
+ if isinstance(client, PromptsClient):
+ if len(client._overloads["call"]) == 1: # type: ignore [attr-defined]
+ client._overloads["call"].append(client.call) # type: ignore [attr-defined]
+ else:
+ raise RuntimeError(f"Unexpected overload order of operations for {client}.call")
+ elif isinstance(client, AgentsClient):
+ if len(client._overloads["call"]) == 0:
+ client._overloads["call"].append(client.call)
+ else:
+ raise RuntimeError(f"Unexpected overload order of operations for {client}.call")
+ else:
+ raise NotImplementedError(f"Unsupported client type: {type(client)}")
+ if len(client._overloads["log"]) == 0:
+ client._overloads["log"].append(client.log)
+ else:
+ raise RuntimeError(f"Unexpected overload order of operations for {client}.log")
+
file_type = _get_file_type_from_client(client)
def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
if "id" in kwargs and "path" in kwargs:
- raise HumanloopRuntimeError(f"Can only specify one of `id` or `path` when {function_name}ing a {file_type}")
+ raise HumanloopRuntimeError(
+ "Can only specify one of `id` or `path` when "
+ f"{'logging' if function_name == 'log' else 'calling'} a {file_type}"
+ )
# Handle local files if enabled
if use_local_files and "path" in kwargs:
# Check if version_id or environment is specified
@@ -204,9 +228,9 @@ def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
try:
if function_name == "call":
- return original_call(**kwargs)
+ return client._overloads["call"][1](**kwargs) # type: ignore [attr-defined, union-attr]
elif function_name == "log":
- return original_log(**kwargs)
+ return client._overloads["log"][0](**kwargs) # type: ignore [attr-defined, union-attr]
else:
raise ValueError(f"Unsupported function name: {function_name}")
except Exception as e:
diff --git a/tests/custom/integration/conftest.py b/tests/custom/integration/conftest.py
index 41e881c9..1cba454f 100644
--- a/tests/custom/integration/conftest.py
+++ b/tests/custom/integration/conftest.py
@@ -41,7 +41,10 @@ def humanloop_test_client() -> Humanloop:
dotenv.load_dotenv()
if not os.getenv("HUMANLOOP_API_KEY"):
pytest.fail("HUMANLOOP_API_KEY is not set for integration tests")
- return Humanloop(api_key=os.getenv("HUMANLOOP_API_KEY"))
+ return Humanloop(
+ api_key=os.getenv("HUMANLOOP_API_KEY"),
+ base_url="http://0.0.0.0:80/v5",
+ )
@pytest.fixture(scope="function")
diff --git a/tests/custom/integration/test_decorators.py b/tests/custom/integration/test_decorators.py
index 218453a6..a7e5400f 100644
--- a/tests/custom/integration/test_decorators.py
+++ b/tests/custom/integration/test_decorators.py
@@ -3,7 +3,6 @@
from openai import OpenAI
from humanloop.client import Humanloop
-from humanloop.types.chat_message import ChatMessage
def test_prompt_decorator(
From 8afe4c46e38d3877d59b341c95162879f548279c Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 13:53:30 +0100
Subject: [PATCH 20/39] Improve typing of tests
---
src/humanloop/sync/sync_client.py | 2 +-
tests/custom/conftest.py | 23 ++++-
tests/custom/integration/conftest.py | 13 +--
tests/custom/integration/test_sync.py | 116 +++++++++++++++-----------
tests/custom/sync/test_client.py | 76 +++--------------
tests/custom/types.py | 5 ++
6 files changed, 105 insertions(+), 130 deletions(-)
create mode 100644 tests/custom/types.py
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 260eeaa4..03314406 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -13,7 +13,7 @@
from humanloop.base_client import BaseHumanloop
# Set up logging
-logger = logging.getLogger(__name__)
+logger = logging.getLogger("humanloop.sdk.sync")
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
formatter = logging.Formatter("%(message)s")
diff --git a/tests/custom/conftest.py b/tests/custom/conftest.py
index c1f64bbf..b7ce7bfa 100644
--- a/tests/custom/conftest.py
+++ b/tests/custom/conftest.py
@@ -1,9 +1,10 @@
from typing import Generator
import typing
+import os
+from dotenv import load_dotenv
from unittest.mock import MagicMock
import pytest
-from humanloop.base_client import BaseHumanloop
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.processor import HumanloopSpanProcessor
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
@@ -18,9 +19,10 @@
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
from opentelemetry.trace import Tracer
+from tests.custom.types import GetHumanloopClientFn
if typing.TYPE_CHECKING:
- from humanloop.client import BaseHumanloop
+ from humanloop.client import Humanloop
@pytest.fixture(scope="function")
@@ -80,10 +82,25 @@ def opentelemetry_test_configuration(
instrumentor.uninstrument()
+
+@pytest.fixture(scope="session")
+def get_humanloop_client() -> GetHumanloopClientFn:
+ load_dotenv()
+ if not os.getenv("HUMANLOOP_API_KEY"):
+ pytest.fail("HUMANLOOP_API_KEY is not set for integration tests")
+
+ def _get_humanloop_test_client(use_local_files: bool = False) -> Humanloop:
+ return Humanloop(
+ api_key=os.getenv("HUMANLOOP_API_KEY"),
+ use_local_files=use_local_files,
+ )
+
+ return _get_humanloop_test_client
+
+
@pytest.fixture(scope="function")
def opentelemetry_hl_test_configuration(
opentelemetry_test_provider: TracerProvider,
- humanloop_client: BaseHumanloop,
) -> Generator[tuple[Tracer, InMemorySpanExporter], None, None]:
"""Configure OTel backend with HumanloopSpanProcessor.
diff --git a/tests/custom/integration/conftest.py b/tests/custom/integration/conftest.py
index 1cba454f..9f89648b 100644
--- a/tests/custom/integration/conftest.py
+++ b/tests/custom/integration/conftest.py
@@ -1,3 +1,4 @@
+from typing import Callable
from contextlib import contextmanager, redirect_stdout
from dataclasses import dataclass
import os
@@ -17,6 +18,7 @@ class TestIdentifiers:
file_path: str
+
@pytest.fixture()
def capture_stdout() -> ContextManager[TextIO]:
@contextmanager
@@ -36,17 +38,6 @@ def openai_key() -> str:
return os.getenv("OPENAI_API_KEY") # type: ignore [return-value]
-@pytest.fixture(scope="session")
-def humanloop_test_client() -> Humanloop:
- dotenv.load_dotenv()
- if not os.getenv("HUMANLOOP_API_KEY"):
- pytest.fail("HUMANLOOP_API_KEY is not set for integration tests")
- return Humanloop(
- api_key=os.getenv("HUMANLOOP_API_KEY"),
- base_url="http://0.0.0.0:80/v5",
- )
-
-
@pytest.fixture(scope="function")
def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None]:
def cleanup_directory(directory_id: str):
diff --git a/tests/custom/integration/test_sync.py b/tests/custom/integration/test_sync.py
index 348bcb7e..b102588c 100644
--- a/tests/custom/integration/test_sync.py
+++ b/tests/custom/integration/test_sync.py
@@ -2,7 +2,11 @@
from pathlib import Path
import pytest
from humanloop import Humanloop, FileType, AgentResponse, PromptResponse
+from humanloop.prompts.client import PromptsClient
+from humanloop.agents.client import AgentsClient
from humanloop.error import HumanloopRuntimeError
+from tests.custom.types import GetHumanloopClientFn
+import logging
class SyncableFile(NamedTuple):
@@ -15,7 +19,8 @@ class SyncableFile(NamedTuple):
@pytest.fixture
def test_file_structure(
- humanloop_test_client: Humanloop, sdk_test_dir: str
+ get_humanloop_client: GetHumanloopClientFn,
+ sdk_test_dir: str,
) -> Generator[list[SyncableFile], None, None]:
"""Creates a predefined structure of files in Humanloop for testing sync"""
files: List[SyncableFile] = [
@@ -46,18 +51,20 @@ def test_file_structure(
),
]
+ humanloop_client: Humanloop = get_humanloop_client()
+
# Create the files in Humanloop
created_files = []
for file in files:
full_path = f"{sdk_test_dir}/{file.path}"
response: Union[AgentResponse, PromptResponse]
if file.type == "prompt":
- response = humanloop_test_client.prompts.upsert(
+ response = humanloop_client.prompts.upsert(
path=full_path,
model=file.model,
)
elif file.type == "agent":
- response = humanloop_test_client.agents.upsert(
+ response = humanloop_client.agents.upsert(
path=full_path,
model=file.model,
)
@@ -82,10 +89,14 @@ def cleanup_local_files():
shutil.rmtree(local_dir)
-def test_pull_basic(humanloop_test_client: Humanloop, test_file_structure: List[SyncableFile]):
+def test_pull_basic(
+ get_humanloop_client: GetHumanloopClientFn,
+ test_file_structure: List[SyncableFile],
+):
"""Test that humanloop.sync() correctly syncs remote files to local filesystem"""
# Run the sync
- successful_files = humanloop_test_client.pull()
+ humanloop_client = get_humanloop_client()
+ successful_files = humanloop_client.pull()
# Verify each file was synced correctly
for file in test_file_structure:
@@ -105,7 +116,7 @@ def test_pull_basic(humanloop_test_client: Humanloop, test_file_structure: List[
def test_overload_with_local_files(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
test_file_structure: List[SyncableFile],
):
"""Test that overload_with_local_files correctly handles local files.
@@ -116,7 +127,8 @@ def test_overload_with_local_files(
3. Test using the pulled files
"""
# First pull the files locally
- humanloop_test_client.pull()
+ humanloop_client = get_humanloop_client(use_local_files=True)
+ humanloop_client.pull()
# Test using the pulled files
test_file = test_file_structure[0] # Use the first test file
@@ -130,30 +142,31 @@ def test_overload_with_local_files(
# Test call with pulled file
response: Union[AgentResponse, PromptResponse]
if test_file.type == "prompt":
- response = humanloop_test_client.prompts.call( # type: ignore [assignment]
+ response = humanloop_client.prompts.call( # type: ignore [assignment]
path=test_file.path, messages=[{"role": "user", "content": "Testing"}]
)
assert response is not None
elif test_file.type == "agent":
- response = humanloop_test_client.agents.call( # type: ignore [assignment]
+ response = humanloop_client.agents.call( # type: ignore [assignment]
path=test_file.path, messages=[{"role": "user", "content": "Testing"}]
)
assert response is not None
# Test with invalid path
with pytest.raises(HumanloopRuntimeError):
+ sub_client: Union[PromptsClient, AgentsClient]
match test_file.type:
case "prompt":
- sub_agent = humanloop_test_client.agents
+ sub_client = humanloop_client.prompts
case "agent":
- sub_agent = humanloop_test_client.agents
+ sub_client = humanloop_client.agents
case _:
raise ValueError(f"Invalid file type: {test_file.type}")
- sub_agent.call(path="invalid/path")
+ sub_client.call(path="invalid/path")
def test_overload_log_with_local_files(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
test_file_structure: List[SyncableFile],
sdk_test_dir: str,
):
@@ -169,7 +182,8 @@ def test_overload_log_with_local_files(
:param cleanup_local_files: Fixture to clean up local files after test
"""
# First pull the files locally
- humanloop_test_client.pull()
+ humanloop_client = get_humanloop_client(use_local_files=True)
+ humanloop_client.pull()
# Test using the pulled files
test_file = test_file_structure[0] # Use the first test file
@@ -182,12 +196,12 @@ def test_overload_log_with_local_files(
# Test log with pulled file
if test_file.type == "prompt":
- response = humanloop_test_client.prompts.log( # type: ignore [assignment]
+ response = humanloop_client.prompts.log( # type: ignore [assignment]
path=test_file.path, messages=[{"role": "user", "content": "Testing"}], output="Test response"
)
assert response is not None
elif test_file.type == "agent":
- response = humanloop_test_client.agents.log( # type: ignore [assignment]
+ response = humanloop_client.agents.log( # type: ignore [assignment]
path=test_file.path, messages=[{"role": "user", "content": "Testing"}], output="Test response"
)
assert response is not None
@@ -195,13 +209,13 @@ def test_overload_log_with_local_files(
# Test with invalid path
with pytest.raises(HumanloopRuntimeError):
if test_file.type == "prompt":
- humanloop_test_client.prompts.log(
+ humanloop_client.prompts.log(
path=f"{sdk_test_dir}/invalid/path",
messages=[{"role": "user", "content": "Testing"}],
output="Test response",
)
elif test_file.type == "agent":
- humanloop_test_client.agents.log(
+ humanloop_client.agents.log(
path=f"{sdk_test_dir}/invalid/path",
messages=[{"role": "user", "content": "Testing"}],
output="Test response",
@@ -209,7 +223,9 @@ def test_overload_log_with_local_files(
def test_overload_version_environment_handling(
- humanloop_test_client: Humanloop, test_file_structure: List[SyncableFile]
+ caplog: pytest.LogCaptureFixture,
+ get_humanloop_client: GetHumanloopClientFn,
+ test_file_structure: List[SyncableFile],
):
"""Test that overload_with_local_files correctly handles version_id and environment parameters.
@@ -219,7 +235,8 @@ def test_overload_version_environment_handling(
3. Test that version_id/environment parameters cause remote usage with warning
"""
# First pull the files locally
- humanloop_test_client.pull()
+ humanloop_client = get_humanloop_client(use_local_files=True)
+ humanloop_client.pull()
# Test using the pulled files
test_file = test_file_structure[0] # Use the first test file
@@ -231,47 +248,46 @@ def test_overload_version_environment_handling(
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
# Test with version_id - should use remote with warning
- with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
+ # Check that the warning was logged
+ with caplog.at_level(level=logging.WARNING, logger="humanloop.sdk"):
if test_file.type == "prompt":
- response = humanloop_test_client.prompts.call( # type: ignore [assignment]
+ response = humanloop_client.prompts.call(
path=test_file.path,
version_id=test_file.version_id,
messages=[{"role": "user", "content": "Testing"}],
)
elif test_file.type == "agent":
- response = humanloop_test_client.agents.call( # type: ignore [assignment]
+ response = humanloop_client.agents.call( # type: ignore [assignment]
path=test_file.path,
version_id=test_file.version_id,
messages=[{"role": "user", "content": "Testing"}],
)
assert response is not None
+ assert any(["Ignoring local file" in record.message for record in caplog.records])
# Test with environment - should use remote with warning
- with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
- if test_file.type == "prompt":
- response = humanloop_test_client.prompts.call( # type: ignore [assignment]
- path=test_file.path, environment="production", messages=[{"role": "user", "content": "Testing"}]
- )
- elif test_file.type == "agent":
- response = humanloop_test_client.agents.call( # type: ignore [assignment]
- path=test_file.path, environment="production", messages=[{"role": "user", "content": "Testing"}]
- )
- assert response is not None
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call( # type: ignore [assignment]
+ path=test_file.path, environment="production", messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call( # type: ignore [assignment]
+ path=test_file.path, environment="production", messages=[{"role": "user", "content": "Testing"}]
+ )
+ assert response is not None
- # Test with both version_id and environment - should use remote with warning
- with pytest.warns(UserWarning, match="Ignoring local file.*as version_id or environment was specified"):
- if test_file.type == "prompt":
- response = humanloop_test_client.prompts.call( # type: ignore [assignment]
- path=test_file.path,
- version_id=test_file.version_id,
- environment="staging",
- messages=[{"role": "user", "content": "Testing"}],
- )
- elif test_file.type == "agent":
- response = humanloop_test_client.agents.call( # type: ignore [assignment]
- path=test_file.path,
- version_id=test_file.version_id,
- environment="staging",
- messages=[{"role": "user", "content": "Testing"}],
- )
- assert response is not None
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call( # type: ignore [assignment]
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call( # type: ignore [assignment]
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+ assert response is not None
diff --git a/tests/custom/sync/test_client.py b/tests/custom/sync/test_client.py
index 79168223..2721ec9f 100644
--- a/tests/custom/sync/test_client.py
+++ b/tests/custom/sync/test_client.py
@@ -1,17 +1,18 @@
import logging
import pytest
+from pathlib import Path
from unittest.mock import Mock, patch
from humanloop.sync.sync_client import SyncClient
from humanloop.error import HumanloopRuntimeError
@pytest.fixture
-def mock_client():
+def mock_client() -> Mock:
return Mock()
@pytest.fixture
-def sync_client(mock_client, tmp_path):
+def sync_client(mock_client: Mock, tmp_path: Path) -> SyncClient:
return SyncClient(
client=mock_client,
base_dir=str(tmp_path),
@@ -20,14 +21,14 @@ def sync_client(mock_client, tmp_path):
)
-def test_init(sync_client, tmp_path):
+def test_init(sync_client: SyncClient, tmp_path: Path):
"""Test basic initialization of SyncClient."""
assert sync_client.base_dir == tmp_path
assert sync_client._cache_size == 10
assert sync_client.SERIALIZABLE_FILE_TYPES == ["prompt", "agent"]
-def test_normalize_path(sync_client):
+def test_normalize_path(sync_client: SyncClient):
"""Test path normalization functionality."""
test_cases = [
("path/to/file.prompt", "path/to/file"),
@@ -41,7 +42,7 @@ def test_normalize_path(sync_client):
assert sync_client._normalize_path(input_path) == expected
-def test_is_file(sync_client):
+def test_is_file(sync_client: SyncClient):
"""Test file type detection."""
assert sync_client.is_file("test.prompt")
assert sync_client.is_file("test.agent")
@@ -49,14 +50,14 @@ def test_is_file(sync_client):
assert not sync_client.is_file("test")
-def test_save_and_read_file(sync_client):
+def test_save_and_read_file(sync_client: SyncClient):
"""Test saving and reading files."""
content = "test content"
path = "test/path"
file_type = "prompt"
# Test saving
- sync_client._save_serialized_file(content, path, file_type)
+ sync_client._save_serialized_file(content, path, "prompt")
saved_path = sync_client.base_dir / path
saved_path = saved_path.parent / f"{saved_path.stem}.{file_type}"
assert saved_path.exists()
@@ -66,62 +67,7 @@ def test_save_and_read_file(sync_client):
assert read_content == content
-def test_pull_file(sync_client, mock_client):
- """Test pulling a single file."""
- mock_file = Mock()
- mock_file.type = "prompt"
- mock_file.path = "test/path"
- mock_file.raw_file_content = "test content"
-
- mock_client.files.retrieve_by_path.return_value = mock_file
-
- success = sync_client._pull_file("test/path.prompt")
- assert success
- assert mock_client.files.retrieve_by_path.called
-
- # Verify file was saved
- saved_path = sync_client.base_dir / "test/path.prompt"
- assert saved_path.exists()
- assert saved_path.read_text() == "test content"
-
-
-def test_pull_directory(sync_client, mock_client):
- """Test pulling multiple files from a directory."""
-
- # Create mock responses for different pages
- def mock_list_files(*args, **kwargs):
- page = kwargs.get("page", 1)
- mock_response = Mock()
-
- if page == 1:
- mock_response.records = [
- Mock(type="prompt", path="test/path1", raw_file_content="content1"),
- Mock(type="agent", path="test/path2", raw_file_content="content2"),
- ]
- else:
- # Return empty list for subsequent pages
- mock_response.records = []
-
- return mock_response
-
- # Set up the mock to use our function
- mock_client.files.list_files.side_effect = mock_list_files
-
- successful, failed = sync_client._pull_directory("test")
- assert len(successful) == 2
- assert len(failed) == 0
-
- # Verify files were saved
- assert (sync_client.base_dir / "test/path1.prompt").exists()
- assert (sync_client.base_dir / "test/path2.agent").exists()
-
- # Verify the mock was called with correct parameters
- mock_client.files.list_files.assert_any_call(
- type=["prompt", "agent"], page=1, include_raw_file_content=True, environment=None, path="test"
- )
-
-
-def test_error_handling(sync_client):
+def test_error_handling(sync_client: SyncClient):
"""Test error handling in various scenarios."""
# Test file not found
with pytest.raises(HumanloopRuntimeError, match="Local file not found"):
@@ -136,13 +82,13 @@ def test_error_handling(sync_client):
assert not sync_client._pull_file("test.prompt")
-def test_cache_functionality(sync_client):
+def test_cache_functionality(sync_client: SyncClient):
"""Test LRU cache functionality."""
# Save a test file
content = "test content"
path = "test/path"
file_type = "prompt"
- sync_client._save_serialized_file(content, path, file_type)
+ sync_client._save_serialized_file(content, path, file_type) # type: ignore [arg-type] Ignore because we're deliberately testing an invalid literal
# First read should hit disk
sync_client.get_file_content(path, file_type)
diff --git a/tests/custom/types.py b/tests/custom/types.py
new file mode 100644
index 00000000..98de8c38
--- /dev/null
+++ b/tests/custom/types.py
@@ -0,0 +1,5 @@
+from typing import Protocol
+from humanloop.client import Humanloop
+
+class GetHumanloopClientFn(Protocol):
+ def __call__(self, use_local_files: bool = False) -> Humanloop: ...
From 7dd6c5bccec5957df36b02b49a7c8f059f79962f Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 14:12:46 +0100
Subject: [PATCH 21/39] refactor: use callable function as fixture instead of
passing in the client directly
---
src/humanloop/overload.py | 1 +
tests/custom/conftest.py | 7 +-
tests/custom/integration/conftest.py | 48 +++++----
tests/custom/integration/test_decorators.py | 70 ++++++-------
tests/custom/integration/test_evals.py | 106 +++++++++++---------
tests/custom/integration/test_sync.py | 70 ++++++-------
6 files changed, 157 insertions(+), 145 deletions(-)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index e3419a18..c2cd9e85 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -206,6 +206,7 @@ def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
normalized_path = sync_client._normalize_path(kwargs["path"])
if use_remote:
+ # Don't allow user to specify path + version_id/environment because it's ambiguous
raise HumanloopRuntimeError(
f"Cannot use local file for `{normalized_path}` as version_id or environment was specified. "
"Please either remove version_id/environment to use local files, or set use_local_files=False to use remote files."
diff --git a/tests/custom/conftest.py b/tests/custom/conftest.py
index b7ce7bfa..b4387550 100644
--- a/tests/custom/conftest.py
+++ b/tests/custom/conftest.py
@@ -1,10 +1,10 @@
from typing import Generator
-import typing
import os
from dotenv import load_dotenv
from unittest.mock import MagicMock
import pytest
+from humanloop.client import Humanloop
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.processor import HumanloopSpanProcessor
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
@@ -21,9 +21,6 @@
from opentelemetry.trace import Tracer
from tests.custom.types import GetHumanloopClientFn
-if typing.TYPE_CHECKING:
- from humanloop.client import Humanloop
-
@pytest.fixture(scope="function")
def opentelemetry_test_provider() -> TracerProvider:
@@ -82,7 +79,6 @@ def opentelemetry_test_configuration(
instrumentor.uninstrument()
-
@pytest.fixture(scope="session")
def get_humanloop_client() -> GetHumanloopClientFn:
load_dotenv()
@@ -92,6 +88,7 @@ def get_humanloop_client() -> GetHumanloopClientFn:
def _get_humanloop_test_client(use_local_files: bool = False) -> Humanloop:
return Humanloop(
api_key=os.getenv("HUMANLOOP_API_KEY"),
+ base_url="http://localhost:80/v5",
use_local_files=use_local_files,
)
diff --git a/tests/custom/integration/conftest.py b/tests/custom/integration/conftest.py
index 9f89648b..3ad0364f 100644
--- a/tests/custom/integration/conftest.py
+++ b/tests/custom/integration/conftest.py
@@ -1,4 +1,3 @@
-from typing import Callable
from contextlib import contextmanager, redirect_stdout
from dataclasses import dataclass
import os
@@ -10,6 +9,7 @@
import pytest
import dotenv
from humanloop.client import Humanloop
+from tests.custom.types import GetHumanloopClientFn
@dataclass
@@ -18,7 +18,6 @@ class TestIdentifiers:
file_path: str
-
@pytest.fixture()
def capture_stdout() -> ContextManager[TextIO]:
@contextmanager
@@ -39,31 +38,32 @@ def openai_key() -> str:
@pytest.fixture(scope="function")
-def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None]:
+def sdk_test_dir(get_humanloop_client: GetHumanloopClientFn) -> Generator[str, None, None]:
+ humanloop_client = get_humanloop_client()
def cleanup_directory(directory_id: str):
- directory_response = humanloop_test_client.directories.get(id=directory_id)
+ directory_response = humanloop_client.directories.get(id=directory_id)
for subdirectory in directory_response.subdirectories:
cleanup_directory(subdirectory.id)
for file in directory_response.files:
match file.type:
case "prompt":
- humanloop_test_client.prompts.delete(id=file.id)
+ humanloop_client.prompts.delete(id=file.id)
case "agent":
- humanloop_test_client.agents.delete(id=file.id)
+ humanloop_client.agents.delete(id=file.id)
case "dataset":
- humanloop_test_client.datasets.delete(id=file.id)
+ humanloop_client.datasets.delete(id=file.id)
case "evaluator":
- humanloop_test_client.evaluators.delete(id=file.id)
+ humanloop_client.evaluators.delete(id=file.id)
case "flow":
- humanloop_test_client.flows.delete(id=file.id)
+ humanloop_client.flows.delete(id=file.id)
case _:
raise ValueError(f"Unknown file type: {file.type}")
- humanloop_test_client.directories.delete(id=directory_response.id)
+ humanloop_client.directories.delete(id=directory_response.id)
path = f"SDK_INTEGRATION_TEST_{uuid.uuid4()}"
response = None
try:
- response = humanloop_test_client.directories.create(path=path)
+ response = humanloop_client.directories.create(path=path)
yield response.path
except Exception as e:
pytest.fail(f"Failed to create directory {path}: {e}")
@@ -93,10 +93,11 @@ def test_prompt_config() -> dict[str, Any]:
@pytest.fixture(scope="function")
-def eval_dataset(humanloop_test_client: Humanloop, sdk_test_dir: str) -> Generator[TestIdentifiers, None, None]:
+def eval_dataset(get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str) -> Generator[TestIdentifiers, None, None]:
+ humanloop_client = get_humanloop_client()
dataset_path = f"{sdk_test_dir}/eval_dataset"
try:
- response = humanloop_test_client.datasets.upsert(
+ response = humanloop_client.datasets.upsert(
path=dataset_path,
datapoints=[
{
@@ -117,34 +118,36 @@ def eval_dataset(humanloop_test_client: Humanloop, sdk_test_dir: str) -> Generat
],
)
yield TestIdentifiers(file_id=response.id, file_path=response.path)
- humanloop_test_client.datasets.delete(id=response.id)
+ humanloop_client.datasets.delete(id=response.id)
except Exception as e:
pytest.fail(f"Failed to create dataset {dataset_path}: {e}")
@pytest.fixture(scope="function")
def eval_prompt(
- humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
+ get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
) -> Generator[TestIdentifiers, None, None]:
+ humanloop_client = get_humanloop_client()
prompt_path = f"{sdk_test_dir}/eval_prompt"
try:
- response = humanloop_test_client.prompts.upsert(
+ response = humanloop_client.prompts.upsert(
path=prompt_path,
**test_prompt_config,
)
yield TestIdentifiers(file_id=response.id, file_path=response.path)
- humanloop_test_client.prompts.delete(id=response.id)
+ humanloop_client.prompts.delete(id=response.id)
except Exception as e:
pytest.fail(f"Failed to create prompt {prompt_path}: {e}")
@pytest.fixture(scope="function")
def output_not_null_evaluator(
- humanloop_test_client: Humanloop, sdk_test_dir: str
+ get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str
) -> Generator[TestIdentifiers, None, None]:
+ humanloop_client = get_humanloop_client()
evaluator_path = f"{sdk_test_dir}/output_not_null_evaluator"
try:
- response = humanloop_test_client.evaluators.upsert(
+ response = humanloop_client.evaluators.upsert(
path=evaluator_path,
spec={
"arguments_type": "target_required",
@@ -157,14 +160,15 @@ def output_not_null(log: dict) -> bool:
},
)
yield TestIdentifiers(file_id=response.id, file_path=response.path)
- humanloop_test_client.evaluators.delete(id=response.id)
+ humanloop_client.evaluators.delete(id=response.id)
except Exception as e:
pytest.fail(f"Failed to create evaluator {evaluator_path}: {e}")
@pytest.fixture(scope="function")
-def id_for_staging_environment(humanloop_test_client: Humanloop, eval_prompt: TestIdentifiers) -> str:
- response = humanloop_test_client.prompts.list_environments(id=eval_prompt.file_id)
+def id_for_staging_environment(get_humanloop_client: GetHumanloopClientFn, eval_prompt: TestIdentifiers) -> str:
+ humanloop_client = get_humanloop_client()
+ response = humanloop_client.prompts.list_environments(id=eval_prompt.file_id)
for environment in response:
if environment.name == "staging":
return environment.id
diff --git a/tests/custom/integration/test_decorators.py b/tests/custom/integration/test_decorators.py
index a7e5400f..15057ba2 100644
--- a/tests/custom/integration/test_decorators.py
+++ b/tests/custom/integration/test_decorators.py
@@ -2,26 +2,27 @@
from typing import Any
from openai import OpenAI
-from humanloop.client import Humanloop
+from tests.custom.integration.conftest import GetHumanloopClientFn
def test_prompt_decorator(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
test_prompt_config: dict[str, Any],
openai_key: str,
):
try:
+ humanloop_client = get_humanloop_client()
prompt_path = f"{sdk_test_dir}/test_prompt"
- prompt_response = humanloop_test_client.prompts.upsert(
+ prompt_response = humanloop_client.prompts.upsert(
path=prompt_path,
**test_prompt_config,
)
- prompt_versions_response = humanloop_test_client.prompts.list_versions(id=prompt_response.id)
+ prompt_versions_response = humanloop_client.prompts.list_versions(id=prompt_response.id)
assert len(prompt_versions_response.records) == 1
- @humanloop_test_client.prompt(path=prompt_path)
+ @humanloop_client.prompt(path=prompt_path)
def my_prompt(question: str) -> str:
openai_client = OpenAI(api_key=openai_key)
@@ -36,26 +37,27 @@ def my_prompt(question: str) -> str:
assert "paris" in my_prompt("What is the capital of the France?").lower()
time.sleep(5)
- prompt_versions_response = humanloop_test_client.prompts.list_versions(id=prompt_response.id)
+ prompt_versions_response = humanloop_client.prompts.list_versions(id=prompt_response.id)
assert len(prompt_versions_response.records) == 2
- logs_response = humanloop_test_client.logs.list(file_id=prompt_response.id, page=1, size=50)
+ logs_response = humanloop_client.logs.list(file_id=prompt_response.id, page=1, size=50)
assert logs_response.items is not None and len(logs_response.items) == 1
finally:
- humanloop_test_client.prompts.delete(id=prompt_response.id)
+ humanloop_client.prompts.delete(id=prompt_response.id)
def test_call_prompt_in_flow_decorator(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
openai_key: str,
):
try:
+ humanloop_client = get_humanloop_client()
- @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow")
+ @humanloop_client.flow(path=f"{sdk_test_dir}/test_flow")
def my_flow(question: str) -> str:
- response = humanloop_test_client.prompts.call(
+ response = humanloop_client.prompts.call(
path=f"{sdk_test_dir}/test_prompt",
prompt={
"provider": "openai",
@@ -71,34 +73,35 @@ def my_flow(question: str) -> str:
assert "paris" in my_flow("What is the capital of the France?").lower()
time.sleep(5)
- prompt_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
+ prompt_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
assert prompt_response is not None
- prompt_logs_response = humanloop_test_client.logs.list(file_id=prompt_response.id, page=1, size=50)
+ prompt_logs_response = humanloop_client.logs.list(file_id=prompt_response.id, page=1, size=50)
assert prompt_logs_response.items is not None and len(prompt_logs_response.items) == 1
prompt_log = prompt_logs_response.items[0]
- flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
assert flow_response is not None
- flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ flow_logs_response = humanloop_client.logs.list(file_id=flow_response.id, page=1, size=50)
assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
flow_log = flow_logs_response.items[0]
assert prompt_log.trace_parent_id == flow_log.id
finally:
- flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
if flow_response is not None:
- humanloop_test_client.flows.delete(id=flow_response.id)
- prompt_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
+ humanloop_client.flows.delete(id=flow_response.id)
+ prompt_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
if prompt_response is not None:
- humanloop_test_client.prompts.delete(id=prompt_response.id)
+ humanloop_client.prompts.delete(id=prompt_response.id)
def test_flow_decorator_logs_exceptions(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
):
try:
+ humanloop_client = get_humanloop_client()
- @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow_log_error")
+ @humanloop_client.flow(path=f"{sdk_test_dir}/test_flow_log_error")
def my_flow(question: str) -> str:
raise ValueError("This is a test exception")
@@ -106,27 +109,28 @@ def my_flow(question: str) -> str:
time.sleep(5)
- flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
assert flow_response is not None
- flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ flow_logs_response = humanloop_client.logs.list(file_id=flow_response.id, page=1, size=50)
assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
flow_log = flow_logs_response.items[0]
assert flow_log.error is not None
assert flow_log.output is None
finally:
- flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
if flow_response is not None:
- humanloop_test_client.flows.delete(id=flow_response.id)
+ humanloop_client.flows.delete(id=flow_response.id)
def test_flow_decorator_populates_output_message(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
):
try:
+ humanloop_client = get_humanloop_client()
- @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow_log_output_message")
+ @humanloop_client.flow(path=f"{sdk_test_dir}/test_flow_log_output_message")
def my_flow(question: str) -> dict[str, Any]:
return {"role": "user", "content": question}
@@ -134,11 +138,9 @@ def my_flow(question: str) -> dict[str, Any]:
time.sleep(5)
- flow_response = humanloop_test_client.files.retrieve_by_path(
- path=f"{sdk_test_dir}/test_flow_log_output_message"
- )
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_output_message")
assert flow_response is not None
- flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ flow_logs_response = humanloop_client.logs.list(file_id=flow_response.id, page=1, size=50)
assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
flow_log = flow_logs_response.items[0]
assert flow_log.output_message is not None
@@ -146,8 +148,6 @@ def my_flow(question: str) -> dict[str, Any]:
assert flow_log.error is None
finally:
- flow_response = humanloop_test_client.files.retrieve_by_path(
- path=f"{sdk_test_dir}/test_flow_log_output_message"
- )
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_output_message")
if flow_response is not None:
- humanloop_test_client.flows.delete(id=flow_response.id)
+ humanloop_client.flows.delete(id=flow_response.id)
diff --git a/tests/custom/integration/test_evals.py b/tests/custom/integration/test_evals.py
index be959649..3c615466 100644
--- a/tests/custom/integration/test_evals.py
+++ b/tests/custom/integration/test_evals.py
@@ -2,18 +2,19 @@
from typing import Any
import pytest
-from humanloop.client import Humanloop
from humanloop.error import HumanloopRuntimeError
from tests.custom.integration.conftest import TestIdentifiers
+from tests.custom.types import GetHumanloopClientFn
def test_eval_run_works_on_online_files(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
output_not_null_evaluator: TestIdentifiers,
eval_dataset: TestIdentifiers,
eval_prompt: TestIdentifiers,
) -> None:
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": eval_prompt.file_path,
@@ -29,29 +30,30 @@ def test_eval_run_works_on_online_files(
],
)
time.sleep(5)
- response = humanloop_test_client.evaluations.list(file_id=eval_prompt.file_id)
+ response = humanloop_client.evaluations.list(file_id=eval_prompt.file_id)
assert response.items and len(response.items) == 1
evaluation_id = response.items[0].id
- run_evaluation_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined]
+ run_evaluation_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined]
assert run_evaluation_response.runs[0].status == "completed"
def test_eval_run_version_id(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
output_not_null_evaluator: TestIdentifiers,
eval_dataset: TestIdentifiers,
eval_prompt: TestIdentifiers,
test_prompt_config: dict[str, Any],
) -> None:
+ humanloop_client = get_humanloop_client()
# GIVEN a prompt where a non-default version is created
new_test_prompt_config = test_prompt_config.copy()
new_test_prompt_config["temperature"] = 1
- new_prompt_version_response = humanloop_test_client.prompts.upsert(
+ new_prompt_version_response = humanloop_client.prompts.upsert(
path=eval_prompt.file_path,
**new_test_prompt_config,
)
# WHEN creating an evaluation using version_id
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"id": new_prompt_version_response.id,
@@ -68,44 +70,45 @@ def test_eval_run_version_id(
],
)
# THEN we evaluate the version created in the test
- evaluations_response = humanloop_test_client.evaluations.list(file_id=new_prompt_version_response.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=new_prompt_version_response.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
assert runs_response.runs[0].status == "completed"
assert (
runs_response.runs[0].version
and runs_response.runs[0].version.version_id == new_prompt_version_response.version_id
)
- list_versions_response = humanloop_test_client.prompts.list_versions(id=new_prompt_version_response.id)
+ list_versions_response = humanloop_client.prompts.list_versions(id=new_prompt_version_response.id)
assert list_versions_response.records and len(list_versions_response.records) == 2
# THEN the version used in evaluation is not the default version
- response = humanloop_test_client.prompts.get(id=new_prompt_version_response.id)
+ response = humanloop_client.prompts.get(id=new_prompt_version_response.id)
assert response.version_id != new_prompt_version_response.version_id
def test_eval_run_environment(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
output_not_null_evaluator: TestIdentifiers,
eval_dataset: TestIdentifiers,
eval_prompt: TestIdentifiers,
test_prompt_config: dict[str, Any],
id_for_staging_environment: str,
) -> None:
+ humanloop_client = get_humanloop_client()
# GIVEN a prompt deployed to staging environment
new_test_prompt_config = test_prompt_config.copy()
new_test_prompt_config["temperature"] = 1
- new_prompt_version_response = humanloop_test_client.prompts.upsert(
+ new_prompt_version_response = humanloop_client.prompts.upsert(
path=eval_prompt.file_path,
**new_test_prompt_config,
)
- humanloop_test_client.prompts.set_deployment(
+ humanloop_client.prompts.set_deployment(
id=new_prompt_version_response.id,
environment_id=id_for_staging_environment,
version_id=new_prompt_version_response.version_id,
)
# WHEN creating an evaluation using environment
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"id": new_prompt_version_response.id,
@@ -122,22 +125,22 @@ def test_eval_run_environment(
],
)
# THEN evaluation is done with the version deployed to staging environment
- evaluations_response = humanloop_test_client.evaluations.list(file_id=new_prompt_version_response.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=new_prompt_version_response.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
assert runs_response.runs[0].status == "completed"
assert (
runs_response.runs[0].version
and runs_response.runs[0].version.version_id == new_prompt_version_response.version_id
)
- default_prompt_version_response = humanloop_test_client.prompts.get(id=new_prompt_version_response.id)
+ default_prompt_version_response = humanloop_client.prompts.get(id=new_prompt_version_response.id)
assert default_prompt_version_response.version_id != new_prompt_version_response.version_id
@pytest.mark.parametrize("version_lookup", ["version_id", "environment"])
def test_eval_run_version_lookup_fails_with_path(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
eval_prompt: TestIdentifiers,
eval_dataset: TestIdentifiers,
output_not_null_evaluator: TestIdentifiers,
@@ -145,7 +148,8 @@ def test_eval_run_version_lookup_fails_with_path(
):
# GIVEN an eval run where we try to evaluate a non-default version
with pytest.raises(HumanloopRuntimeError) as e:
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": eval_prompt.file_path,
@@ -167,13 +171,14 @@ def test_eval_run_version_lookup_fails_with_path(
def test_eval_run_with_version_upsert(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
eval_prompt: TestIdentifiers,
eval_dataset: TestIdentifiers,
output_not_null_evaluator: TestIdentifiers,
test_prompt_config: dict[str, Any],
):
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": eval_prompt.file_path,
@@ -193,23 +198,24 @@ def test_eval_run_with_version_upsert(
],
)
# THEN the version is upserted and evaluation finishes successfully
- evaluations_response = humanloop_test_client.evaluations.list(file_id=eval_prompt.file_id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=eval_prompt.file_id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
assert runs_response.runs[0].status == "completed"
# THEN a version was upserted based on file.version
- list_prompt_versions_response = humanloop_test_client.prompts.list_versions(id=eval_prompt.file_id)
+ list_prompt_versions_response = humanloop_client.prompts.list_versions(id=eval_prompt.file_id)
assert list_prompt_versions_response.records and len(list_prompt_versions_response.records) == 2
def test_flow_eval_does_not_work_without_callable(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
eval_dataset: TestIdentifiers,
output_not_null_evaluator: TestIdentifiers,
):
with pytest.raises(HumanloopRuntimeError) as e:
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": "Test Flow",
@@ -234,28 +240,29 @@ def test_flow_eval_does_not_work_without_callable(
def test_flow_eval_works_with_callable(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
eval_dataset: TestIdentifiers,
output_not_null_evaluator: TestIdentifiers,
sdk_test_dir: str,
):
+ humanloop_client = get_humanloop_client()
flow_path = f"{sdk_test_dir}/Test Flow"
# GIVEN a flow with a callable
- flow_response = humanloop_test_client.flows.upsert(
+ flow_response = humanloop_client.flows.upsert(
path=flow_path,
attributes={
"foo": "bar",
},
)
try:
- flow = humanloop_test_client.flows.upsert(
+ flow = humanloop_client.flows.upsert(
path=flow_path,
attributes={
"foo": "bar",
},
)
# WHEN we run an evaluation with the flow
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"id": flow.id,
@@ -272,22 +279,23 @@ def test_flow_eval_works_with_callable(
],
)
# THEN the evaluation finishes successfully
- evaluations_response = humanloop_test_client.evaluations.list(file_id=flow.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=flow.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
assert runs_response.runs[0].status == "completed"
finally:
- humanloop_test_client.flows.delete(id=flow_response.id)
+ humanloop_client.flows.delete(id=flow_response.id)
def test_cannot_evaluate_agent_with_callable(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
eval_dataset: TestIdentifiers,
output_not_null_evaluator: TestIdentifiers,
):
with pytest.raises(ValueError) as e:
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": "Test Agent",
@@ -307,14 +315,15 @@ def test_cannot_evaluate_agent_with_callable(
def test_flow_eval_resolves_to_default_with_callable(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
output_not_null_evaluator: TestIdentifiers,
eval_dataset: TestIdentifiers,
sdk_test_dir: str,
) -> None:
+ humanloop_client = get_humanloop_client()
# GIVEN a flow with some attributes
flow_path = f"{sdk_test_dir}/Test Flow"
- flow_response = humanloop_test_client.flows.upsert(
+ flow_response = humanloop_client.flows.upsert(
path=flow_path,
attributes={
"foo": "bar",
@@ -322,7 +331,7 @@ def test_flow_eval_resolves_to_default_with_callable(
)
try:
# WHEN running an evaluation with the flow's callable but no version
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"id": flow_response.id,
@@ -339,24 +348,25 @@ def test_flow_eval_resolves_to_default_with_callable(
],
)
# THEN the evaluation finishes successfully
- evaluations_response = humanloop_test_client.evaluations.list(file_id=flow_response.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=flow_response.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items and evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
assert runs_response.runs[0].status == "completed"
finally:
# Clean up test resources
- humanloop_test_client.flows.delete(id=flow_response.id)
+ humanloop_client.flows.delete(id=flow_response.id)
@pytest.mark.skip(reason="Skip until agents are in prod")
def test_agent_eval_works_upserting(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
eval_dataset: TestIdentifiers,
output_not_null_evaluator: TestIdentifiers,
sdk_test_dir: str,
):
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": f"{sdk_test_dir}/Test Agent",
@@ -387,7 +397,7 @@ def test_agent_eval_works_upserting(
}
],
)
- files_response = humanloop_test_client.files.list_files(page=1, size=100)
+ files_response = humanloop_client.files.list_files(page=1, size=100)
eval_agent = None
for file in files_response.records:
if file.path == f"{sdk_test_dir}/Test Agent":
@@ -395,8 +405,8 @@ def test_agent_eval_works_upserting(
break
assert eval_agent and eval_agent.type == "agent"
# THEN the evaluation finishes successfully
- evaluations_response = humanloop_test_client.evaluations.list(file_id=eval_agent.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=eval_agent.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
assert runs_response.runs[0].status == "completed"
diff --git a/tests/custom/integration/test_sync.py b/tests/custom/integration/test_sync.py
index b102588c..1a3bb624 100644
--- a/tests/custom/integration/test_sync.py
+++ b/tests/custom/integration/test_sync.py
@@ -6,7 +6,6 @@
from humanloop.agents.client import AgentsClient
from humanloop.error import HumanloopRuntimeError
from tests.custom.types import GetHumanloopClientFn
-import logging
class SyncableFile(NamedTuple):
@@ -223,7 +222,6 @@ def test_overload_log_with_local_files(
def test_overload_version_environment_handling(
- caplog: pytest.LogCaptureFixture,
get_humanloop_client: GetHumanloopClientFn,
test_file_structure: List[SyncableFile],
):
@@ -232,7 +230,7 @@ def test_overload_version_environment_handling(
Flow:
1. Create files in remote (via test_file_structure fixture)
2. Pull files locally
- 3. Test that version_id/environment parameters cause remote usage with warning
+ 3. Test that specifying path + version_id/environment raises HumanloopRuntimeError
"""
# First pull the files locally
humanloop_client = get_humanloop_client(use_local_files=True)
@@ -247,47 +245,49 @@ def test_overload_version_environment_handling(
assert local_path.exists(), f"Expected pulled file at {local_path}"
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
- # Test with version_id - should use remote with warning
- # Check that the warning was logged
- with caplog.at_level(level=logging.WARNING, logger="humanloop.sdk"):
+ # Test with version_id - should raise HumanloopRuntimeError
+ with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
if test_file.type == "prompt":
- response = humanloop_client.prompts.call(
+ humanloop_client.prompts.call(
path=test_file.path,
version_id=test_file.version_id,
messages=[{"role": "user", "content": "Testing"}],
)
elif test_file.type == "agent":
- response = humanloop_client.agents.call( # type: ignore [assignment]
+ humanloop_client.agents.call(
path=test_file.path,
version_id=test_file.version_id,
messages=[{"role": "user", "content": "Testing"}],
)
- assert response is not None
- assert any(["Ignoring local file" in record.message for record in caplog.records])
- # Test with environment - should use remote with warning
- if test_file.type == "prompt":
- response = humanloop_client.prompts.call( # type: ignore [assignment]
- path=test_file.path, environment="production", messages=[{"role": "user", "content": "Testing"}]
- )
- elif test_file.type == "agent":
- response = humanloop_client.agents.call( # type: ignore [assignment]
- path=test_file.path, environment="production", messages=[{"role": "user", "content": "Testing"}]
- )
- assert response is not None
+ # Test with environment - should raise HumanloopRuntimeError
+ with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
- if test_file.type == "prompt":
- response = humanloop_client.prompts.call( # type: ignore [assignment]
- path=test_file.path,
- version_id=test_file.version_id,
- environment="staging",
- messages=[{"role": "user", "content": "Testing"}],
- )
- elif test_file.type == "agent":
- response = humanloop_client.agents.call( # type: ignore [assignment]
- path=test_file.path,
- version_id=test_file.version_id,
- environment="staging",
- messages=[{"role": "user", "content": "Testing"}],
- )
- assert response is not None
+ # Test with both version_id and environment - should raise HumanloopRuntimeError
+ with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
From e2fc2a3c3245c267999420bd9a70c3e11fae8a6d Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 17:11:33 +0100
Subject: [PATCH 22/39] refactor(overload): improve client method overloading
and error handling
---
src/humanloop/client.py | 20 +-
src/humanloop/decorators/flow.py | 2 +-
src/humanloop/overload.py | 375 ++++++++++++--------------
tests/custom/conftest.py | 4 +-
tests/custom/integration/conftest.py | 6 +-
tests/custom/integration/test_sync.py | 45 ++--
6 files changed, 201 insertions(+), 251 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index e9166229..83810d8b 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -18,7 +18,7 @@
)
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
-from humanloop.overload import overload_call, overload_log, overload_with_local_files
+from humanloop.overload import overload_client
from humanloop.decorators.flow import flow as flow_decorator_factory
from humanloop.decorators.prompt import prompt_decorator_factory
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
@@ -150,20 +150,14 @@ def __init__(
# Overload the .log method of the clients to be aware of Evaluation Context
# and the @flow decorator providing the trace_id
- self.prompts = overload_log(client=self.prompts)
- self.prompts = overload_call(client=self.prompts)
- self.prompts = overload_with_local_files( # type: ignore [assignment]
- client=self.prompts,
- sync_client=self._sync_client,
- use_local_files=self.use_local_files,
+ self.prompts = overload_client(
+ client=self.prompts, sync_client=self._sync_client, use_local_files=self.use_local_files
)
- self.agents = overload_with_local_files( # type: ignore [assignment]
- client=self.agents,
- sync_client=self._sync_client,
- use_local_files=self.use_local_files,
+ self.agents = overload_client(
+ client=self.agents, sync_client=self._sync_client, use_local_files=self.use_local_files
)
- self.flows = overload_log(client=self.flows)
- self.tools = overload_log(client=self.tools)
+ self.flows = overload_client(client=self.flows)
+ self.tools = overload_client(client=self.tools)
if opentelemetry_tracer_provider is not None:
self._tracer_provider = opentelemetry_tracer_provider
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index f35a699c..ff7cf115 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -67,7 +67,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
"messages": func_args.get("messages"),
"trace_parent_id": trace_id,
}
- this_flow_log: FlowLogResponse = client.flows._log( # type: ignore [attr-defined]
+ this_flow_log: FlowLogResponse = client.flows.log(
path=decorator_context.path,
flow=decorator_context.version,
log_status="incomplete",
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index c2cd9e85..0da4c56a 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -1,23 +1,21 @@
-from collections import defaultdict
import inspect
import logging
import types
-import warnings
-from typing import TypeVar, Union
-from pathlib import Path
+from typing import Any, Dict, Optional, Union, Callable
+
from humanloop.context import (
get_decorator_context,
get_evaluation_context,
get_trace_id,
)
from humanloop.error import HumanloopRuntimeError
-
-from humanloop.evaluators.client import EvaluatorsClient
-from humanloop.flows.client import FlowsClient
+from humanloop.sync.sync_client import SyncClient
from humanloop.prompts.client import PromptsClient
+from humanloop.flows.client import FlowsClient
+from humanloop.datasets.client import DatasetsClient
from humanloop.agents.client import AgentsClient
from humanloop.tools.client import ToolsClient
-from humanloop.sync.sync_client import SyncClient
+from humanloop.evaluators.client import EvaluatorsClient
from humanloop.types import FileType
from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
from humanloop.types.create_flow_log_response import CreateFlowLogResponse
@@ -27,33 +25,40 @@
logger = logging.getLogger("humanloop.sdk")
+ResponseType = Union[
+ CreatePromptLogResponse,
+ CreateToolLogResponse,
+ CreateFlowLogResponse,
+ CreateEvaluatorLogResponse,
+ PromptCallResponse,
+]
+
-CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, FlowsClient, EvaluatorsClient, ToolsClient)
-
-
-def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE:
- """
- Wrap the `log` method of the provided Humanloop client to use EVALUATION_CONTEXT.
-
- This makes the overloaded log actions be aware of whether the created Log is
- part of an Evaluation (e.g. one started by eval_utils.run_eval).
- """
- # Copy the original log method in a hidden attribute
- client._log = client.log # type: ignore [attr-defined]
-
- def _overload_log(
- # It's safe to only consider kwargs since the original
- # log method bans positional arguments
- self,
- **kwargs,
- ) -> Union[
- CreatePromptLogResponse,
- CreateToolLogResponse,
- CreateFlowLogResponse,
- CreateEvaluatorLogResponse,
- ]:
- trace_id = get_trace_id()
- if trace_id is not None and type(client) is FlowsClient:
+def _get_file_type_from_client(
+ client: Union[PromptsClient, AgentsClient, ToolsClient, FlowsClient, DatasetsClient, EvaluatorsClient],
+) -> FileType:
+ """Get the file type based on the client type."""
+ if isinstance(client, PromptsClient):
+ return "prompt"
+ elif isinstance(client, AgentsClient):
+ return "agent"
+ elif isinstance(client, ToolsClient):
+ return "tool"
+ elif isinstance(client, FlowsClient):
+ return "flow"
+ elif isinstance(client, DatasetsClient):
+ return "dataset"
+ elif isinstance(client, EvaluatorsClient):
+ return "evaluator"
+
+ raise ValueError(f"Unsupported client type: {type(client)}")
+
+
+def _handle_tracing_context(kwargs: Dict[str, Any], client: Any) -> Dict[str, Any]:
+ """Handle tracing context for both log and call methods."""
+ trace_id = get_trace_id()
+ if trace_id is not None:
+ if "flow" in str(type(client).__name__).lower():
context = get_decorator_context()
if context is None:
raise HumanloopRuntimeError("Internal error: trace_id context is set outside a decorator context.")
@@ -61,189 +66,143 @@ def _overload_log(
f"Using `flows.log()` is not allowed: Flow decorator "
f"for File {context.path} manages the tracing and trace completion."
)
- if trace_id is not None:
- if "trace_parent_id" in kwargs:
- logger.warning(
- "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
- inspect.currentframe().f_lineno, # type: ignore [union-attr]
- )
- kwargs = {
- **kwargs,
- "trace_parent_id": trace_id,
- }
- evaluation_context = get_evaluation_context()
- if evaluation_context is not None:
- kwargs_eval, eval_callback = evaluation_context.log_args_with_context(
- path=kwargs.get("path"), log_args=kwargs
- )
- try:
- response = self._log(**kwargs_eval)
- except Exception as e:
- # Re-raising as HumanloopRuntimeError so the decorators don't catch it
- raise HumanloopRuntimeError from e
- if eval_callback is not None:
- eval_callback(response.id)
- else:
- try:
- response = self._log(**kwargs)
- except Exception as e:
- # Re-raising as HumanloopRuntimeError so the decorators don't catch it
- raise HumanloopRuntimeError from e
-
- return response
- # Replace the original log method with the overloaded one
- client.log = types.MethodType(_overload_log, client) # type: ignore [assignment]
- # Return the client with the overloaded log method
- logger.debug("Overloaded the .call method of %s", client)
- return client
+ if "trace_parent_id" in kwargs:
+ logger.warning(
+ "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
+ inspect.currentframe().f_lineno, # type: ignore [union-attr]
+ )
+ kwargs = {
+ **kwargs,
+ "trace_parent_id": trace_id,
+ }
+ return kwargs
-def overload_call(client: PromptsClient) -> PromptsClient:
- if not hasattr(client, "_overloads"):
- client._overloads = defaultdict(list) # type: ignore [attr-defined]
- if len(client._overloads["call"]) == 0: # type: ignore [attr-defined]
- client._overloads["call"].append(client.call) # type: ignore [attr-defined]
-
- def _overload_call(self, **kwargs) -> PromptCallResponse:
- # trace_id is None if logging outside a decorator
- trace_id = get_trace_id()
- if trace_id is not None:
- if "trace_parent_id" in kwargs:
- logger.warning(
- "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
- inspect.currentframe().f_lineno, # type: ignore [union-attr]
- )
- kwargs = {
- **kwargs,
- "trace_parent_id": trace_id,
- }
-
- try:
- response = client._overloads["call"][0](**kwargs) # type: ignore [attr-defined]
- except Exception as e:
- # Re-raising as HumanloopRuntimeError so the decorators don't catch it
- raise HumanloopRuntimeError from e
+def _handle_local_files(
+ kwargs: Dict[str, Any],
+ client: Any,
+ sync_client: Optional[SyncClient],
+ use_local_files: bool,
+) -> Dict[str, Any]:
+ """Handle local file loading if enabled."""
+ if not use_local_files or "path" not in kwargs or sync_client is None:
+ return kwargs
- return response
+ if "id" in kwargs:
+ raise HumanloopRuntimeError("Can only specify one of `id` or `path`")
- client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
- return client
+ # Check if version_id or environment is specified
+ use_remote = any(["version_id" in kwargs, "environment" in kwargs])
+ normalized_path = sync_client._normalize_path(kwargs["path"])
+ if use_remote:
+ raise HumanloopRuntimeError(
+ f"Cannot use local file for `{normalized_path}` as version_id or environment was specified. "
+ "Please either remove version_id/environment to use local files, or set use_local_files=False to use remote files."
+ )
-def _get_file_type_from_client(client: Union[PromptsClient, AgentsClient]) -> FileType:
- """Get the file type based on the client type."""
- if isinstance(client, PromptsClient):
- return "prompt"
- elif isinstance(client, AgentsClient):
- return "agent"
- else:
- raise ValueError(f"Unsupported client type: {type(client)}")
+ file_type = _get_file_type_from_client(client)
+ # If file_type is already specified in kwargs, it means user provided a PromptKernelRequestParams object
+ if file_type in kwargs and not isinstance(kwargs[file_type], str):
+ logger.warning(
+ f"Ignoring local file for `{normalized_path}` as {file_type} parameters were directly provided. "
+ "Using provided parameters instead."
+ )
+ return kwargs
+
+ try:
+ file_content = sync_client.get_file_content(normalized_path, file_type)
+ kwargs[file_type] = file_content
+ except HumanloopRuntimeError as e:
+ raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
+
+ return kwargs
+
+
+def _handle_evaluation_context(kwargs: Dict[str, Any]) -> tuple[Dict[str, Any], Optional[Callable[[str], None]]]:
+ """Handle evaluation context for logging."""
+ evaluation_context = get_evaluation_context()
+ if evaluation_context is not None:
+ return evaluation_context.log_args_with_context(path=kwargs.get("path"), log_args=kwargs)
+ return kwargs, None
+
+
+def _overload_log(self: Any, sync_client: Optional[SyncClient], use_local_files: bool, **kwargs) -> ResponseType:
+ try:
+ # Special handling for flows - prevent direct log usage
+ if type(self) is FlowsClient and get_trace_id() is not None:
+ context = get_decorator_context()
+ if context is None:
+ raise HumanloopRuntimeError("Internal error: trace_id context is set outside a decorator context.")
+ raise HumanloopRuntimeError(
+ f"Using `flows.log()` is not allowed: Flow decorator "
+ f"for File {context.path} manages the tracing and trace completion."
+ )
+ kwargs = _handle_tracing_context(kwargs, self)
-def overload_with_local_files(
- client: Union[PromptsClient, AgentsClient],
- sync_client: SyncClient,
- use_local_files: bool,
-) -> Union[PromptsClient, AgentsClient]:
- """Overload call and log methods to handle local files when use_local_files is True.
-
- When use_local_files is True, the following prioritization strategy is used:
- 1. Direct Parameters: If {file_type} parameters are provided directly (as a PromptKernelRequestParams or AgentKernelRequestParams object),
- these take precedence and the local file is ignored.
- 2. Version/Environment: If version_id or environment is specified, the remote version is used instead
- of the local file.
- 3. Local File: If neither of the above are specified, attempts to use the local file at the given path.
-
- For example, with a prompt client:
- - If prompt={model: "gpt-4", ...} is provided, uses those parameters directly
- - If version_id="123" is provided, uses that remote version
- - Otherwise, tries to load from the local file at the given path
-
- Args:
- client: The client to overload (PromptsClient or AgentsClient)
- sync_client: The sync client used for file operations
- use_local_files: Whether to enable local file handling
-
- Returns:
- The client with overloaded methods
-
- Raises:
- HumanloopRuntimeError: If use_local_files is True and local file cannot be accessed
- """
- if not hasattr(client, "_overloads"):
- client._overloads = defaultdict(list) # type: ignore [union-attr]
- # If the method has been overloaded, don't re-add the method
- if isinstance(client, PromptsClient):
- if len(client._overloads["call"]) == 1: # type: ignore [attr-defined]
- client._overloads["call"].append(client.call) # type: ignore [attr-defined]
- else:
- raise RuntimeError(f"Unexpected overload order of operations for {client}.call")
- elif isinstance(client, AgentsClient):
- if len(client._overloads["call"]) == 0:
- client._overloads["call"].append(client.call)
- else:
- raise RuntimeError(f"Unexpected overload order of operations for {client}.call")
- else:
- raise NotImplementedError(f"Unsupported client type: {type(client)}")
- if len(client._overloads["log"]) == 0:
- client._overloads["log"].append(client.log)
- else:
- raise RuntimeError(f"Unexpected overload order of operations for {client}.log")
+ # Handle local files for Prompts and Agents clients
+ if _get_file_type_from_client(self) in ["prompt", "agent"]:
+ if sync_client is None:
+ logger.error("sync_client is None but client has log method and use_local_files=%s", use_local_files)
+ raise HumanloopRuntimeError("sync_client is required for clients that support local file operations")
+ kwargs = _handle_local_files(kwargs, self, sync_client, use_local_files)
- file_type = _get_file_type_from_client(client)
+ kwargs, eval_callback = _handle_evaluation_context(kwargs)
+ response = self._log(**kwargs) # Use stored original method
+ if eval_callback is not None:
+ eval_callback(response.id)
+ return response
+ except HumanloopRuntimeError:
+ # Re-raise HumanloopRuntimeError without wrapping to preserve the message
+ raise
+ except Exception as e:
+ # Only wrap non-HumanloopRuntimeError exceptions
+ raise HumanloopRuntimeError from e
+
+
+def _overload_call(self: Any, sync_client: Optional[SyncClient], use_local_files: bool, **kwargs) -> PromptCallResponse:
+ try:
+ kwargs = _handle_tracing_context(kwargs, self)
+ kwargs = _handle_local_files(kwargs, self, sync_client, use_local_files)
+ return self._call(**kwargs) # Use stored original method
+ except HumanloopRuntimeError:
+ # Re-raise HumanloopRuntimeError without wrapping to preserve the message
+ raise
+ except Exception as e:
+ # Only wrap non-HumanloopRuntimeError exceptions
+ raise HumanloopRuntimeError from e
+
+
+def overload_client(
+ client: Any,
+ sync_client: Optional[SyncClient] = None,
+ use_local_files: bool = False,
+) -> Any:
+ """Overloads client methods to add tracing, local file handling, and evaluation context."""
+ # Store original log method as _log for all clients
+ if hasattr(client, "log") and not hasattr(client, "_log"):
+ client._log = client.log # type: ignore [attr-defined]
+
+ # Create a closure to capture sync_client and use_local_files
+ def log_wrapper(self: Any, **kwargs) -> ResponseType:
+ return _overload_log(self, sync_client, use_local_files, **kwargs)
+
+ client.log = types.MethodType(log_wrapper, client)
+
+ # Overload call method for Prompt and Agent clients
+ if _get_file_type_from_client(client) in ["prompt", "agent"]:
+ if sync_client is None and use_local_files:
+ logger.error("sync_client is None but client has call method and use_local_files=%s", use_local_files)
+ raise HumanloopRuntimeError("sync_client is required for clients that support call operations")
+ if hasattr(client, "call") and not hasattr(client, "_call"):
+ client._call = client.call # type: ignore [attr-defined]
+
+ # Create a closure to capture sync_client and use_local_files
+ def call_wrapper(self: Any, **kwargs) -> PromptCallResponse:
+ return _overload_call(self, sync_client, use_local_files, **kwargs)
+
+ client.call = types.MethodType(call_wrapper, client)
- def _overload(self, function_name: str, **kwargs) -> PromptCallResponse:
- if "id" in kwargs and "path" in kwargs:
- raise HumanloopRuntimeError(
- "Can only specify one of `id` or `path` when "
- f"{'logging' if function_name == 'log' else 'calling'} a {file_type}"
- )
- # Handle local files if enabled
- if use_local_files and "path" in kwargs:
- # Check if version_id or environment is specified
- use_remote = any(["version_id" in kwargs, "environment" in kwargs])
- normalized_path = sync_client._normalize_path(kwargs["path"])
-
- if use_remote:
- # Don't allow user to specify path + version_id/environment because it's ambiguous
- raise HumanloopRuntimeError(
- f"Cannot use local file for `{normalized_path}` as version_id or environment was specified. "
- "Please either remove version_id/environment to use local files, or set use_local_files=False to use remote files."
- )
- else:
- # Only use local file if no version info is specified
- try:
- # If file_type is already specified in kwargs, it means user provided a PromptKernelRequestParams object
- if file_type in kwargs and not isinstance(kwargs[file_type], str):
- logger.warning(
- f"Ignoring local file for `{normalized_path}` as {file_type} parameters were directly provided. "
- "Using provided parameters instead."
- )
- else:
- file_content = sync_client.get_file_content(normalized_path, file_type)
- kwargs[file_type] = file_content
- except HumanloopRuntimeError as e:
- # Re-raise with more context
- raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
-
- try:
- if function_name == "call":
- return client._overloads["call"][1](**kwargs) # type: ignore [attr-defined, union-attr]
- elif function_name == "log":
- return client._overloads["log"][0](**kwargs) # type: ignore [attr-defined, union-attr]
- else:
- raise ValueError(f"Unsupported function name: {function_name}")
- except Exception as e:
- # Re-raising as HumanloopRuntimeError so the decorators don't catch it
- raise HumanloopRuntimeError from e
-
- def _overload_call(self, **kwargs) -> PromptCallResponse:
- return _overload(self, "call", **kwargs)
-
- def _overload_log(self, **kwargs) -> PromptCallResponse:
- return _overload(self, "log", **kwargs)
-
- client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
- client.log = types.MethodType(_overload_log, client) # type: ignore [assignment]
return client
diff --git a/tests/custom/conftest.py b/tests/custom/conftest.py
index b4387550..6d877721 100644
--- a/tests/custom/conftest.py
+++ b/tests/custom/conftest.py
@@ -85,14 +85,14 @@ def get_humanloop_client() -> GetHumanloopClientFn:
if not os.getenv("HUMANLOOP_API_KEY"):
pytest.fail("HUMANLOOP_API_KEY is not set for integration tests")
- def _get_humanloop_test_client(use_local_files: bool = False) -> Humanloop:
+ def _get_humanloop_client(use_local_files: bool = False) -> Humanloop:
return Humanloop(
api_key=os.getenv("HUMANLOOP_API_KEY"),
base_url="http://localhost:80/v5",
use_local_files=use_local_files,
)
- return _get_humanloop_test_client
+ return _get_humanloop_client
@pytest.fixture(scope="function")
diff --git a/tests/custom/integration/conftest.py b/tests/custom/integration/conftest.py
index 3ad0364f..8b226b17 100644
--- a/tests/custom/integration/conftest.py
+++ b/tests/custom/integration/conftest.py
@@ -46,16 +46,18 @@ def cleanup_directory(directory_id: str):
cleanup_directory(subdirectory.id)
for file in directory_response.files:
match file.type:
- case "prompt":
- humanloop_client.prompts.delete(id=file.id)
case "agent":
humanloop_client.agents.delete(id=file.id)
+ case "prompt":
+ humanloop_client.prompts.delete(id=file.id)
case "dataset":
humanloop_client.datasets.delete(id=file.id)
case "evaluator":
humanloop_client.evaluators.delete(id=file.id)
case "flow":
humanloop_client.flows.delete(id=file.id)
+ case "tool":
+ humanloop_client.tools.delete(id=file.id)
case _:
raise ValueError(f"Unknown file type: {file.type}")
humanloop_client.directories.delete(id=directory_response.id)
diff --git a/tests/custom/integration/test_sync.py b/tests/custom/integration/test_sync.py
index 1a3bb624..ab77963d 100644
--- a/tests/custom/integration/test_sync.py
+++ b/tests/custom/integration/test_sync.py
@@ -1,7 +1,7 @@
from typing import Generator, List, NamedTuple, Union
from pathlib import Path
import pytest
-from humanloop import Humanloop, FileType, AgentResponse, PromptResponse
+from humanloop import FileType, AgentResponse, PromptResponse
from humanloop.prompts.client import PromptsClient
from humanloop.agents.client import AgentsClient
from humanloop.error import HumanloopRuntimeError
@@ -20,8 +20,13 @@ class SyncableFile(NamedTuple):
def test_file_structure(
get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
+ request, # This gives us access to the test function's parameters
) -> Generator[list[SyncableFile], None, None]:
- """Creates a predefined structure of files in Humanloop for testing sync"""
+ """Creates a predefined structure of files in Humanloop for testing sync.
+
+ The fixture will use use_local_files=True if the test function requests it,
+ otherwise it will use use_local_files=False.
+ """
files: List[SyncableFile] = [
SyncableFile(
path="prompts/gpt-4",
@@ -50,8 +55,7 @@ def test_file_structure(
),
]
- humanloop_client: Humanloop = get_humanloop_client()
-
+ humanloop_client = get_humanloop_client()
# Create the files in Humanloop
created_files = []
for file in files:
@@ -73,6 +77,8 @@ def test_file_structure(
)
)
+ humanloop_client.pull()
+
yield created_files
@@ -88,9 +94,14 @@ def cleanup_local_files():
shutil.rmtree(local_dir)
+def test_client(get_humanloop_client: GetHumanloopClientFn):
+ client = get_humanloop_client(use_local_files=True)
+ assert client.use_local_files
+
+
def test_pull_basic(
- get_humanloop_client: GetHumanloopClientFn,
test_file_structure: List[SyncableFile],
+ get_humanloop_client: GetHumanloopClientFn,
):
"""Test that humanloop.sync() correctly syncs remote files to local filesystem"""
# Run the sync
@@ -118,14 +129,8 @@ def test_overload_with_local_files(
get_humanloop_client: GetHumanloopClientFn,
test_file_structure: List[SyncableFile],
):
- """Test that overload_with_local_files correctly handles local files.
-
- Flow:
- 1. Create files in remote (via test_file_structure fixture)
- 2. Pull files locally
- 3. Test using the pulled files
- """
- # First pull the files locally
+ """Test that overload_with_local_files correctly handles local files."""
+ # The test_file_structure fixture will automatically use use_local_files=True
humanloop_client = get_humanloop_client(use_local_files=True)
humanloop_client.pull()
@@ -169,18 +174,8 @@ def test_overload_log_with_local_files(
test_file_structure: List[SyncableFile],
sdk_test_dir: str,
):
- """Test that overload_with_local_files correctly handles local files for log operations.
-
- Flow:
- 1. Create files in remote (via test_file_structure fixture)
- 2. Pull files locally
- 3. Test logging using the pulled files
-
- :param humanloop_test_client: The Humanloop client with local files enabled
- :param test_file_structure: List of test files created in remote
- :param cleanup_local_files: Fixture to clean up local files after test
- """
- # First pull the files locally
+ """Test that overload_with_local_files correctly handles local files for log operations."""
+ # The test_file_structure fixture will automatically use use_local_files=True
humanloop_client = get_humanloop_client(use_local_files=True)
humanloop_client.pull()
From 0e520ab517173469af780bfb45e4c01aa4328551 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 17:26:12 +0100
Subject: [PATCH 23/39] fix: use original private stored log method in flow
decorator
---
src/humanloop/decorators/flow.py | 2 +-
src/humanloop/overload.py | 2 +-
tests/custom/sync/test_client.py | 5 +++--
3 files changed, 5 insertions(+), 4 deletions(-)
diff --git a/src/humanloop/decorators/flow.py b/src/humanloop/decorators/flow.py
index ff7cf115..f35a699c 100644
--- a/src/humanloop/decorators/flow.py
+++ b/src/humanloop/decorators/flow.py
@@ -67,7 +67,7 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> Optional[R]:
"messages": func_args.get("messages"),
"trace_parent_id": trace_id,
}
- this_flow_log: FlowLogResponse = client.flows.log(
+ this_flow_log: FlowLogResponse = client.flows._log( # type: ignore [attr-defined]
path=decorator_context.path,
flow=decorator_context.version,
log_status="incomplete",
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index 0da4c56a..f66f6cdf 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -181,7 +181,7 @@ def overload_client(
use_local_files: bool = False,
) -> Any:
"""Overloads client methods to add tracing, local file handling, and evaluation context."""
- # Store original log method as _log for all clients
+ # Store original log method as _log for all clients. Used in flow decorator
if hasattr(client, "log") and not hasattr(client, "_log"):
client._log = client.log # type: ignore [attr-defined]
diff --git a/tests/custom/sync/test_client.py b/tests/custom/sync/test_client.py
index 2721ec9f..bb3d2cd0 100644
--- a/tests/custom/sync/test_client.py
+++ b/tests/custom/sync/test_client.py
@@ -4,6 +4,7 @@
from unittest.mock import Mock, patch
from humanloop.sync.sync_client import SyncClient
from humanloop.error import HumanloopRuntimeError
+from typing import Literal
@pytest.fixture
@@ -87,8 +88,8 @@ def test_cache_functionality(sync_client: SyncClient):
# Save a test file
content = "test content"
path = "test/path"
- file_type = "prompt"
- sync_client._save_serialized_file(content, path, file_type) # type: ignore [arg-type] Ignore because we're deliberately testing an invalid literal
+ file_type: Literal["prompt", "agent"] = "prompt"
+ sync_client._save_serialized_file(content, path, file_type)
# First read should hit disk
sync_client.get_file_content(path, file_type)
From 55023c3dc16668c60f6bfe65f24da7c3de01d7d5 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 17:27:01 +0100
Subject: [PATCH 24/39] Rename TestIdentifiers to ResourceIdentifiers to
silence warning
---
tests/custom/integration/conftest.py | 20 +++++-----
tests/custom/integration/test_evals.py | 52 +++++++++++++-------------
2 files changed, 37 insertions(+), 35 deletions(-)
diff --git a/tests/custom/integration/conftest.py b/tests/custom/integration/conftest.py
index 8b226b17..1c443471 100644
--- a/tests/custom/integration/conftest.py
+++ b/tests/custom/integration/conftest.py
@@ -8,12 +8,11 @@
import uuid
import pytest
import dotenv
-from humanloop.client import Humanloop
from tests.custom.types import GetHumanloopClientFn
@dataclass
-class TestIdentifiers:
+class ResourceIdentifiers:
file_id: str
file_path: str
@@ -40,6 +39,7 @@ def openai_key() -> str:
@pytest.fixture(scope="function")
def sdk_test_dir(get_humanloop_client: GetHumanloopClientFn) -> Generator[str, None, None]:
humanloop_client = get_humanloop_client()
+
def cleanup_directory(directory_id: str):
directory_response = humanloop_client.directories.get(id=directory_id)
for subdirectory in directory_response.subdirectories:
@@ -95,7 +95,9 @@ def test_prompt_config() -> dict[str, Any]:
@pytest.fixture(scope="function")
-def eval_dataset(get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str) -> Generator[TestIdentifiers, None, None]:
+def eval_dataset(
+ get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str
+) -> Generator[ResourceIdentifiers, None, None]:
humanloop_client = get_humanloop_client()
dataset_path = f"{sdk_test_dir}/eval_dataset"
try:
@@ -119,7 +121,7 @@ def eval_dataset(get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str)
},
],
)
- yield TestIdentifiers(file_id=response.id, file_path=response.path)
+ yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
humanloop_client.datasets.delete(id=response.id)
except Exception as e:
pytest.fail(f"Failed to create dataset {dataset_path}: {e}")
@@ -128,7 +130,7 @@ def eval_dataset(get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str)
@pytest.fixture(scope="function")
def eval_prompt(
get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
-) -> Generator[TestIdentifiers, None, None]:
+) -> Generator[ResourceIdentifiers, None, None]:
humanloop_client = get_humanloop_client()
prompt_path = f"{sdk_test_dir}/eval_prompt"
try:
@@ -136,7 +138,7 @@ def eval_prompt(
path=prompt_path,
**test_prompt_config,
)
- yield TestIdentifiers(file_id=response.id, file_path=response.path)
+ yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
humanloop_client.prompts.delete(id=response.id)
except Exception as e:
pytest.fail(f"Failed to create prompt {prompt_path}: {e}")
@@ -145,7 +147,7 @@ def eval_prompt(
@pytest.fixture(scope="function")
def output_not_null_evaluator(
get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str
-) -> Generator[TestIdentifiers, None, None]:
+) -> Generator[ResourceIdentifiers, None, None]:
humanloop_client = get_humanloop_client()
evaluator_path = f"{sdk_test_dir}/output_not_null_evaluator"
try:
@@ -161,14 +163,14 @@ def output_not_null(log: dict) -> bool:
"evaluator_type": "python",
},
)
- yield TestIdentifiers(file_id=response.id, file_path=response.path)
+ yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
humanloop_client.evaluators.delete(id=response.id)
except Exception as e:
pytest.fail(f"Failed to create evaluator {evaluator_path}: {e}")
@pytest.fixture(scope="function")
-def id_for_staging_environment(get_humanloop_client: GetHumanloopClientFn, eval_prompt: TestIdentifiers) -> str:
+def id_for_staging_environment(get_humanloop_client: GetHumanloopClientFn, eval_prompt: ResourceIdentifiers) -> str:
humanloop_client = get_humanloop_client()
response = humanloop_client.prompts.list_environments(id=eval_prompt.file_id)
for environment in response:
diff --git a/tests/custom/integration/test_evals.py b/tests/custom/integration/test_evals.py
index 3c615466..625e31bc 100644
--- a/tests/custom/integration/test_evals.py
+++ b/tests/custom/integration/test_evals.py
@@ -3,15 +3,15 @@
import pytest
from humanloop.error import HumanloopRuntimeError
-from tests.custom.integration.conftest import TestIdentifiers
+from tests.custom.integration.conftest import ResourceIdentifiers
from tests.custom.types import GetHumanloopClientFn
def test_eval_run_works_on_online_files(
get_humanloop_client: GetHumanloopClientFn,
- output_not_null_evaluator: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- eval_prompt: TestIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ eval_prompt: ResourceIdentifiers,
) -> None:
humanloop_client = get_humanloop_client()
humanloop_client.evaluations.run( # type: ignore [attr-defined]
@@ -39,9 +39,9 @@ def test_eval_run_works_on_online_files(
def test_eval_run_version_id(
get_humanloop_client: GetHumanloopClientFn,
- output_not_null_evaluator: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- eval_prompt: TestIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ eval_prompt: ResourceIdentifiers,
test_prompt_config: dict[str, Any],
) -> None:
humanloop_client = get_humanloop_client()
@@ -88,9 +88,9 @@ def test_eval_run_version_id(
def test_eval_run_environment(
get_humanloop_client: GetHumanloopClientFn,
- output_not_null_evaluator: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- eval_prompt: TestIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ eval_prompt: ResourceIdentifiers,
test_prompt_config: dict[str, Any],
id_for_staging_environment: str,
) -> None:
@@ -141,9 +141,9 @@ def test_eval_run_environment(
@pytest.mark.parametrize("version_lookup", ["version_id", "environment"])
def test_eval_run_version_lookup_fails_with_path(
get_humanloop_client: GetHumanloopClientFn,
- eval_prompt: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ eval_prompt: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
version_lookup: str,
):
# GIVEN an eval run where we try to evaluate a non-default version
@@ -172,9 +172,9 @@ def test_eval_run_version_lookup_fails_with_path(
def test_eval_run_with_version_upsert(
get_humanloop_client: GetHumanloopClientFn,
- eval_prompt: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ eval_prompt: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
test_prompt_config: dict[str, Any],
):
humanloop_client = get_humanloop_client()
@@ -210,8 +210,8 @@ def test_eval_run_with_version_upsert(
def test_flow_eval_does_not_work_without_callable(
get_humanloop_client: GetHumanloopClientFn,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
):
with pytest.raises(HumanloopRuntimeError) as e:
humanloop_client = get_humanloop_client()
@@ -241,8 +241,8 @@ def test_flow_eval_does_not_work_without_callable(
def test_flow_eval_works_with_callable(
get_humanloop_client: GetHumanloopClientFn,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
sdk_test_dir: str,
):
humanloop_client = get_humanloop_client()
@@ -290,8 +290,8 @@ def test_flow_eval_works_with_callable(
def test_cannot_evaluate_agent_with_callable(
get_humanloop_client: GetHumanloopClientFn,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
):
with pytest.raises(ValueError) as e:
humanloop_client = get_humanloop_client()
@@ -316,8 +316,8 @@ def test_cannot_evaluate_agent_with_callable(
def test_flow_eval_resolves_to_default_with_callable(
get_humanloop_client: GetHumanloopClientFn,
- output_not_null_evaluator: TestIdentifiers,
- eval_dataset: TestIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
sdk_test_dir: str,
) -> None:
humanloop_client = get_humanloop_client()
@@ -361,8 +361,8 @@ def test_flow_eval_resolves_to_default_with_callable(
@pytest.mark.skip(reason="Skip until agents are in prod")
def test_agent_eval_works_upserting(
get_humanloop_client: GetHumanloopClientFn,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
sdk_test_dir: str,
):
humanloop_client = get_humanloop_client()
From 4e344d120231ffb94ed8712a256c0132bf646a10 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 17:32:09 +0100
Subject: [PATCH 25/39] test: enable agent evaluation test as agents are now in
prod
---
tests/custom/integration/test_evals.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/tests/custom/integration/test_evals.py b/tests/custom/integration/test_evals.py
index 625e31bc..2ec74d93 100644
--- a/tests/custom/integration/test_evals.py
+++ b/tests/custom/integration/test_evals.py
@@ -358,7 +358,6 @@ def test_flow_eval_resolves_to_default_with_callable(
humanloop_client.flows.delete(id=flow_response.id)
-@pytest.mark.skip(reason="Skip until agents are in prod")
def test_agent_eval_works_upserting(
get_humanloop_client: GetHumanloopClientFn,
eval_dataset: ResourceIdentifiers,
From 70ba9c9e2800526736cfe003960c8cd9e505a358 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 17:40:16 +0100
Subject: [PATCH 26/39] test: improve test_sync.py and sync/test_client.py
readability and clarity
---
tests/custom/integration/test_sync.py | 98 ++++++++++++---------------
tests/custom/sync/test_client.py | 45 +++++++++---
2 files changed, 76 insertions(+), 67 deletions(-)
diff --git a/tests/custom/integration/test_sync.py b/tests/custom/integration/test_sync.py
index ab77963d..0423d384 100644
--- a/tests/custom/integration/test_sync.py
+++ b/tests/custom/integration/test_sync.py
@@ -17,16 +17,11 @@ class SyncableFile(NamedTuple):
@pytest.fixture
-def test_file_structure(
+def syncable_files_fixture(
get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
- request, # This gives us access to the test function's parameters
) -> Generator[list[SyncableFile], None, None]:
- """Creates a predefined structure of files in Humanloop for testing sync.
-
- The fixture will use use_local_files=True if the test function requests it,
- otherwise it will use use_local_files=False.
- """
+ """Creates a predefined structure of files in Humanloop for testing sync."""
files: List[SyncableFile] = [
SyncableFile(
path="prompts/gpt-4",
@@ -56,7 +51,6 @@ def test_file_structure(
]
humanloop_client = get_humanloop_client()
- # Create the files in Humanloop
created_files = []
for file in files:
full_path = f"{sdk_test_dir}/{file.path}"
@@ -86,7 +80,6 @@ def test_file_structure(
def cleanup_local_files():
"""Cleanup any locally synced files after tests"""
yield
- # Clean up the local humanloop directory after tests
local_dir = Path("humanloop")
if local_dir.exists():
import shutil
@@ -94,69 +87,64 @@ def cleanup_local_files():
shutil.rmtree(local_dir)
-def test_client(get_humanloop_client: GetHumanloopClientFn):
- client = get_humanloop_client(use_local_files=True)
- assert client.use_local_files
-
-
def test_pull_basic(
- test_file_structure: List[SyncableFile],
+ syncable_files_fixture: List[SyncableFile],
get_humanloop_client: GetHumanloopClientFn,
):
"""Test that humanloop.sync() correctly syncs remote files to local filesystem"""
- # Run the sync
+ # GIVEN a set of files in the remote system (from syncable_files_fixture)
humanloop_client = get_humanloop_client()
+
+ # WHEN running the sync
successful_files = humanloop_client.pull()
- # Verify each file was synced correctly
- for file in test_file_structure:
- # Get the extension based on file type: .prompt, .agent
+ # THEN our local filesystem should mirror the remote filesystem in the HL Workspace
+ for file in syncable_files_fixture:
extension = f".{file.type}"
-
- # The local path should mirror the remote path structure
local_path = Path("humanloop") / f"{file.path}{extension}"
- # Basic assertions
+ # THEN the file and its directory should exist
assert local_path.exists(), f"Expected synced file at {local_path}"
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
- # Verify it's not empty
+ # THEN the file should not be empty
content = local_path.read_text()
assert content, f"File at {local_path} should not be empty"
def test_overload_with_local_files(
get_humanloop_client: GetHumanloopClientFn,
- test_file_structure: List[SyncableFile],
+ syncable_files_fixture: List[SyncableFile],
):
"""Test that overload_with_local_files correctly handles local files."""
- # The test_file_structure fixture will automatically use use_local_files=True
+ # GIVEN a client with use_local_files=True and pulled files
humanloop_client = get_humanloop_client(use_local_files=True)
humanloop_client.pull()
- # Test using the pulled files
- test_file = test_file_structure[0] # Use the first test file
+ # GIVEN a test file from the structure
+ test_file = syncable_files_fixture[0]
extension = f".{test_file.type}"
local_path = Path("humanloop") / f"{test_file.path}{extension}"
- # Verify the file was pulled correctly
+ # THEN the file should exist locally
assert local_path.exists(), f"Expected pulled file at {local_path}"
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
- # Test call with pulled file
+ # WHEN calling the file
response: Union[AgentResponse, PromptResponse]
if test_file.type == "prompt":
response = humanloop_client.prompts.call( # type: ignore [assignment]
path=test_file.path, messages=[{"role": "user", "content": "Testing"}]
)
- assert response is not None
elif test_file.type == "agent":
response = humanloop_client.agents.call( # type: ignore [assignment]
path=test_file.path, messages=[{"role": "user", "content": "Testing"}]
)
- assert response is not None
+ # THEN the response should not be None
+ assert response is not None
- # Test with invalid path
+ # WHEN calling with an invalid path
+ # THEN it should raise HumanloopRuntimeError
with pytest.raises(HumanloopRuntimeError):
sub_client: Union[PromptsClient, AgentsClient]
match test_file.type:
@@ -171,36 +159,37 @@ def test_overload_with_local_files(
def test_overload_log_with_local_files(
get_humanloop_client: GetHumanloopClientFn,
- test_file_structure: List[SyncableFile],
+ syncable_files_fixture: List[SyncableFile],
sdk_test_dir: str,
):
"""Test that overload_with_local_files correctly handles local files for log operations."""
- # The test_file_structure fixture will automatically use use_local_files=True
+ # GIVEN a client with use_local_files=True and pulled files
humanloop_client = get_humanloop_client(use_local_files=True)
humanloop_client.pull()
- # Test using the pulled files
- test_file = test_file_structure[0] # Use the first test file
+ # GIVEN a test file from the structure
+ test_file = syncable_files_fixture[0]
extension = f".{test_file.type}"
local_path = Path("humanloop") / f"{test_file.path}{extension}"
- # Verify the file was pulled correctly
+ # THEN the file should exist locally
assert local_path.exists(), f"Expected pulled file at {local_path}"
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
- # Test log with pulled file
+ # WHEN logging with the pulled file
if test_file.type == "prompt":
response = humanloop_client.prompts.log( # type: ignore [assignment]
path=test_file.path, messages=[{"role": "user", "content": "Testing"}], output="Test response"
)
- assert response is not None
elif test_file.type == "agent":
response = humanloop_client.agents.log( # type: ignore [assignment]
path=test_file.path, messages=[{"role": "user", "content": "Testing"}], output="Test response"
)
- assert response is not None
+ # THEN the response should not be None
+ assert response is not None
- # Test with invalid path
+ # WHEN logging with an invalid path
+ # THEN it should raise HumanloopRuntimeError
with pytest.raises(HumanloopRuntimeError):
if test_file.type == "prompt":
humanloop_client.prompts.log(
@@ -218,29 +207,24 @@ def test_overload_log_with_local_files(
def test_overload_version_environment_handling(
get_humanloop_client: GetHumanloopClientFn,
- test_file_structure: List[SyncableFile],
+ syncable_files_fixture: List[SyncableFile],
):
- """Test that overload_with_local_files correctly handles version_id and environment parameters.
-
- Flow:
- 1. Create files in remote (via test_file_structure fixture)
- 2. Pull files locally
- 3. Test that specifying path + version_id/environment raises HumanloopRuntimeError
- """
- # First pull the files locally
+ """Test that overload_with_local_files correctly handles version_id and environment parameters."""
+ # GIVEN a client with use_local_files=True and pulled files
humanloop_client = get_humanloop_client(use_local_files=True)
humanloop_client.pull()
- # Test using the pulled files
- test_file = test_file_structure[0] # Use the first test file
+ # GIVEN a test file from the structure
+ test_file = syncable_files_fixture[0]
extension = f".{test_file.type}"
local_path = Path("humanloop") / f"{test_file.path}{extension}"
- # Verify the file was pulled correctly
+ # THEN the file should exist locally
assert local_path.exists(), f"Expected pulled file at {local_path}"
assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
- # Test with version_id - should raise HumanloopRuntimeError
+ # WHEN calling with version_id
+ # THEN it should raise HumanloopRuntimeError
with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
if test_file.type == "prompt":
humanloop_client.prompts.call(
@@ -255,7 +239,8 @@ def test_overload_version_environment_handling(
messages=[{"role": "user", "content": "Testing"}],
)
- # Test with environment - should raise HumanloopRuntimeError
+ # WHEN calling with environment
+ # THEN it should raise HumanloopRuntimeError
with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
if test_file.type == "prompt":
humanloop_client.prompts.call(
@@ -270,7 +255,8 @@ def test_overload_version_environment_handling(
messages=[{"role": "user", "content": "Testing"}],
)
- # Test with both version_id and environment - should raise HumanloopRuntimeError
+ # WHEN calling with both version_id and environment
+ # THEN it should raise HumanloopRuntimeError
with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
if test_file.type == "prompt":
humanloop_client.prompts.call(
diff --git a/tests/custom/sync/test_client.py b/tests/custom/sync/test_client.py
index bb3d2cd0..37cec52b 100644
--- a/tests/custom/sync/test_client.py
+++ b/tests/custom/sync/test_client.py
@@ -24,6 +24,8 @@ def sync_client(mock_client: Mock, tmp_path: Path) -> SyncClient:
def test_init(sync_client: SyncClient, tmp_path: Path):
"""Test basic initialization of SyncClient."""
+ # GIVEN a SyncClient instance
+ # THEN it should be initialized with correct base directory, cache size and file types
assert sync_client.base_dir == tmp_path
assert sync_client._cache_size == 10
assert sync_client.SERIALIZABLE_FILE_TYPES == ["prompt", "agent"]
@@ -31,6 +33,7 @@ def test_init(sync_client: SyncClient, tmp_path: Path):
def test_normalize_path(sync_client: SyncClient):
"""Test path normalization functionality."""
+ # GIVEN various file paths with different formats
test_cases = [
("path/to/file.prompt", "path/to/file"),
("path\\to\\file.agent", "path/to/file"),
@@ -40,11 +43,17 @@ def test_normalize_path(sync_client: SyncClient):
]
for input_path, expected in test_cases:
- assert sync_client._normalize_path(input_path) == expected
+ # WHEN they are normalized
+ normalized = sync_client._normalize_path(input_path)
+ # THEN they should be converted to the expected format
+ assert normalized == expected
def test_is_file(sync_client: SyncClient):
"""Test file type detection."""
+ # GIVEN various file paths
+ # WHEN checking if they are valid file types
+ # THEN only .prompt and .agent files should return True
assert sync_client.is_file("test.prompt")
assert sync_client.is_file("test.agent")
assert not sync_client.is_file("test.txt")
@@ -53,54 +62,68 @@ def test_is_file(sync_client: SyncClient):
def test_save_and_read_file(sync_client: SyncClient):
"""Test saving and reading files."""
+ # GIVEN a file content and path
content = "test content"
path = "test/path"
file_type = "prompt"
- # Test saving
+ # WHEN saving the file
sync_client._save_serialized_file(content, path, "prompt")
saved_path = sync_client.base_dir / path
saved_path = saved_path.parent / f"{saved_path.stem}.{file_type}"
+
+ # THEN the file should exist on disk
assert saved_path.exists()
- # Test reading
+ # WHEN reading the file
read_content = sync_client.get_file_content(path, file_type)
+
+ # THEN the content should match
assert read_content == content
def test_error_handling(sync_client: SyncClient):
"""Test error handling in various scenarios."""
- # Test file not found
+ # GIVEN a nonexistent file
+ # WHEN trying to read it
+ # THEN a HumanloopRuntimeError should be raised
with pytest.raises(HumanloopRuntimeError, match="Local file not found"):
sync_client.get_file_content("nonexistent", "prompt")
- # Test invalid file type
+ # GIVEN an invalid file type
+ # WHEN trying to pull the file
+ # THEN a ValueError should be raised
with pytest.raises(ValueError, match="Unsupported file type"):
sync_client._pull_file("test.txt")
- # Test API error
+ # GIVEN an API error
+ # WHEN trying to pull a file
+ # THEN it should return False
with patch.object(sync_client.client.files, "retrieve_by_path", side_effect=Exception("API Error")):
assert not sync_client._pull_file("test.prompt")
def test_cache_functionality(sync_client: SyncClient):
"""Test LRU cache functionality."""
- # Save a test file
+ # GIVEN a test file
content = "test content"
path = "test/path"
file_type: Literal["prompt", "agent"] = "prompt"
sync_client._save_serialized_file(content, path, file_type)
- # First read should hit disk
+ # WHEN reading the file for the first time
sync_client.get_file_content(path, file_type)
+ # THEN it should hit disk (implicitly verified by no cache hit)
- # Modify file on disk
+ # WHEN modifying the file on disk
saved_path = sync_client.base_dir / f"{path}.{file_type}"
saved_path.write_text("modified content")
- # Second read should use cache
+ # THEN subsequent reads should use cache
assert sync_client.get_file_content(path, file_type) == content
- # Clear cache and verify new content is read
+ # WHEN clearing the cache
sync_client.clear_cache()
+
+ # THEN new content should be read from disk
assert sync_client.get_file_content(path, file_type) == "modified content"
From fe1d2464ce1fd56413eab201528c5e52250f4688 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 18:10:24 +0100
Subject: [PATCH 27/39] Refactor loading of API key into its own function in
Sync CLI
---
src/humanloop/cli/__main__.py | 119 ++++++++++++++++++++++------------
1 file changed, 78 insertions(+), 41 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index ebf4fd33..186de14e 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -1,6 +1,5 @@
import click
import logging
-from pathlib import Path
from typing import Optional, Callable
from functools import wraps
from dotenv import load_dotenv, find_dotenv
@@ -8,7 +7,6 @@
import sys
from humanloop import Humanloop
from humanloop.sync.sync_client import SyncClient
-from datetime import datetime
import time
# Set up logging
@@ -26,37 +24,69 @@
INFO_COLOR = "blue"
WARNING_COLOR = "yellow"
-def get_client(api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None) -> Humanloop:
- """Get a Humanloop client instance.
-
- If no API key is provided, it will be loaded from the .env file, or the environment variable HUMANLOOP_API_KEY.
+
+def load_api_key(env_file: Optional[str] = None) -> str:
+ """Load API key from provided value, .env file, or environment variable.
+
+ Args:
+ api_key: Optional API key provided directly
+ env_file: Optional path to .env file
+
+ Returns:
+ str: The loaded API key
Raises:
- click.ClickException: If no API key is found.
+ click.ClickException: If no API key is found
"""
- if not api_key:
- if env_file:
- load_dotenv(env_file)
+ # Try loading from .env file
+ if env_file:
+ load_dotenv(env_file)
+ else:
+ # Try to find .env file in current directory or parent directories
+ env_path = find_dotenv()
+ if env_path:
+ load_dotenv(env_path)
+ elif os.path.exists(".env"):
+ load_dotenv(".env")
else:
- env_path = find_dotenv()
- if env_path:
- load_dotenv(env_path)
- else:
- if os.path.exists(".env"):
- load_dotenv(".env")
- else:
- load_dotenv()
-
- api_key = os.getenv("HUMANLOOP_API_KEY")
- if not api_key:
- raise click.ClickException(
- click.style("No API key found. Set HUMANLOOP_API_KEY in .env file or environment, or use --api-key", fg=ERROR_COLOR)
- )
+ load_dotenv()
+
+ # Get API key from environment
+ api_key = os.getenv("HUMANLOOP_API_KEY")
+ if not api_key:
+ raise click.ClickException(
+ click.style(
+ "No API key found. Set HUMANLOOP_API_KEY in .env file or environment, or use --api-key", fg=ERROR_COLOR
+ )
+ )
+
+ return api_key
+
+def get_client(
+ api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None
+) -> Humanloop:
+ """Get a Humanloop client instance.
+
+ Args:
+ api_key: Optional API key provided directly
+ env_file: Optional path to .env file
+ base_url: Optional base URL for the API
+
+ Returns:
+ Humanloop: Configured client instance
+
+ Raises:
+ click.ClickException: If no API key is found
+ """
+ if not api_key:
+ api_key = load_api_key(env_file)
return Humanloop(api_key=api_key, base_url=base_url)
+
def common_options(f: Callable) -> Callable:
"""Decorator for common CLI options."""
+
@click.option(
"--api-key",
help="Humanloop API key. If not provided, uses HUMANLOOP_API_KEY from .env or environment.",
@@ -84,13 +114,16 @@ def common_options(f: Callable) -> Callable:
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
+
return wrapper
+
def handle_sync_errors(f: Callable) -> Callable:
"""Decorator for handling sync operation errors.
-
+
If an error occurs in any operation that uses this decorator, it will be logged and the program will exit with a non-zero exit code.
"""
+
@wraps(f)
def wrapper(*args, **kwargs):
try:
@@ -98,19 +131,22 @@ def wrapper(*args, **kwargs):
except Exception as e:
click.echo(click.style(str(f"Error: {e}"), fg=ERROR_COLOR))
sys.exit(1)
+
return wrapper
+
@click.group(
help="Humanloop CLI for managing sync operations.",
context_settings={
"help_option_names": ["-h", "--help"],
"max_content_width": 100,
- }
+ },
)
-def cli(): # Does nothing because used as a group for other subcommands (pull, push, etc.)
+def cli(): # Does nothing because used as a group for other subcommands (pull, push, etc.)
"""Humanloop CLI for managing sync operations."""
pass
+
@cli.command()
@click.option(
"--path",
@@ -140,14 +176,14 @@ def cli(): # Does nothing because used as a group for other subcommands (pull, p
@handle_sync_errors
@common_options
def pull(
- path: Optional[str],
- environment: Optional[str],
- api_key: Optional[str],
- env_file: Optional[str],
- base_dir: str,
- base_url: Optional[str],
+ path: Optional[str],
+ environment: Optional[str],
+ api_key: Optional[str],
+ env_file: Optional[str],
+ base_dir: str,
+ base_url: Optional[str],
verbose: bool,
- quiet: bool
+ quiet: bool,
):
"""Pull Prompt and Agent files from Humanloop to your local filesystem.
@@ -178,29 +214,30 @@ def pull(
Currently only supports syncing Prompt and Agent files. Other file types will be skipped."""
client = get_client(api_key, env_file, base_url)
sync_client = SyncClient(client, base_dir=base_dir, log_level=logging.DEBUG if verbose else logging.WARNING)
-
+
click.echo(click.style("Pulling files from Humanloop...", fg=INFO_COLOR))
click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
click.echo(click.style(f"Environment: {environment or '(default)'}", fg=INFO_COLOR))
-
+
start_time = time.time()
successful_files, failed_files = sync_client.pull(path, environment)
duration_ms = int((time.time() - start_time) * 1000)
-
+
# Determine if the operation was successful based on failed_files
is_successful = not failed_files
duration_color = SUCCESS_COLOR if is_successful else ERROR_COLOR
click.echo(click.style(f"Pull completed in {duration_ms}ms", fg=duration_color))
-
+
if successful_files and not quiet:
click.echo(click.style(f"\nSuccessfully pulled {len(successful_files)} files:", fg=SUCCESS_COLOR))
- for file in successful_files:
+ for file in successful_files:
click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
-
+
if failed_files:
click.echo(click.style(f"\nFailed to pull {len(failed_files)} files:", fg=ERROR_COLOR))
for file in failed_files:
click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
+
if __name__ == "__main__":
- cli()
\ No newline at end of file
+ cli()
From 1f5055d9322250ead8a3ad9af31db1c072372f2b Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Mon, 12 May 2025 19:26:52 +0100
Subject: [PATCH 28/39] test: add tests for CLI
---
src/humanloop/sync/sync_client.py | 10 +-
tests/custom/conftest.py | 1 -
tests/custom/integration/conftest.py | 84 +++++++++-
tests/custom/integration/test_sync.py | 76 +--------
tests/custom/integration/test_sync_cli.py | 183 ++++++++++++++++++++++
tests/custom/types.py | 12 +-
6 files changed, 287 insertions(+), 79 deletions(-)
create mode 100644 tests/custom/integration/test_sync_cli.py
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 03314406..ff7a8aeb 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -7,7 +7,6 @@
import time
from humanloop.error import HumanloopRuntimeError
import json
-import re
if TYPE_CHECKING:
from humanloop.base_client import BaseHumanloop
@@ -41,8 +40,13 @@ def format_api_error(error: Exception) -> str:
# Get the detail from the body
detail = body.get("detail", {})
- # Prefer description, fall back to msg
- return detail.get("description") or detail.get("msg") or error_msg
+ # Handle both string and dictionary types for detail
+ if isinstance(detail, str):
+ return detail
+ elif isinstance(detail, dict):
+ return detail.get("description") or detail.get("msg") or error_msg
+ else:
+ return error_msg
except Exception as e:
logger.debug(f"Failed to parse error message: {str(e)}")
return error_msg
diff --git a/tests/custom/conftest.py b/tests/custom/conftest.py
index 6d877721..7667dedf 100644
--- a/tests/custom/conftest.py
+++ b/tests/custom/conftest.py
@@ -88,7 +88,6 @@ def get_humanloop_client() -> GetHumanloopClientFn:
def _get_humanloop_client(use_local_files: bool = False) -> Humanloop:
return Humanloop(
api_key=os.getenv("HUMANLOOP_API_KEY"),
- base_url="http://localhost:80/v5",
use_local_files=use_local_files,
)
diff --git a/tests/custom/integration/conftest.py b/tests/custom/integration/conftest.py
index 1c443471..f918c48c 100644
--- a/tests/custom/integration/conftest.py
+++ b/tests/custom/integration/conftest.py
@@ -2,13 +2,15 @@
from dataclasses import dataclass
import os
import time
-from typing import Any, ContextManager, Generator
+from typing import Any, ContextManager, Generator, List, Union
import io
from typing import TextIO
import uuid
import pytest
import dotenv
-from tests.custom.types import GetHumanloopClientFn
+from humanloop import AgentResponse, PromptResponse
+from tests.custom.types import GetHumanloopClientFn, SyncableFile
+from click.testing import CliRunner
@dataclass
@@ -177,3 +179,81 @@ def id_for_staging_environment(get_humanloop_client: GetHumanloopClientFn, eval_
if environment.name == "staging":
return environment.id
pytest.fail("Staging environment not found")
+
+
+@pytest.fixture
+def syncable_files_fixture(
+ get_humanloop_client: GetHumanloopClientFn,
+ sdk_test_dir: str,
+) -> Generator[list[SyncableFile], None, None]:
+ """Creates a predefined structure of files in Humanloop for testing sync."""
+ files: List[SyncableFile] = [
+ SyncableFile(
+ path="prompts/gpt-4",
+ type="prompt",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="prompts/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="prompts/nested/complex/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="agents/gpt-4",
+ type="agent",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="agents/gpt-4o",
+ type="agent",
+ model="gpt-4o",
+ ),
+ ]
+
+ humanloop_client = get_humanloop_client()
+ created_files = []
+ for file in files:
+ full_path = f"{sdk_test_dir}/{file.path}"
+ response: Union[AgentResponse, PromptResponse]
+ if file.type == "prompt":
+ response = humanloop_client.prompts.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ elif file.type == "agent":
+ response = humanloop_client.agents.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ created_files.append(
+ SyncableFile(
+ path=full_path, type=file.type, model=file.model, id=response.id, version_id=response.version_id
+ )
+ )
+
+ yield created_files
+
+
+@pytest.fixture
+def cli_runner() -> CliRunner:
+ """GIVEN a CLI runner
+ THEN it should be configured to catch exceptions
+ """
+ return CliRunner(mix_stderr=False)
+
+
+@pytest.fixture
+def no_humanloop_api_key_in_env(monkeypatch):
+ """Fixture that removes HUMANLOOP_API_KEY from environment variables.
+
+ Use this fixture in tests that verify behavior when no API key is available
+ in the environment (but could still be loaded from .env files).
+ """
+ # Remove API key from environment
+ monkeypatch.delenv("HUMANLOOP_API_KEY", raising=False)
+ yield
diff --git a/tests/custom/integration/test_sync.py b/tests/custom/integration/test_sync.py
index 0423d384..6e7b002b 100644
--- a/tests/custom/integration/test_sync.py
+++ b/tests/custom/integration/test_sync.py
@@ -1,79 +1,11 @@
-from typing import Generator, List, NamedTuple, Union
+from typing import List, Union
from pathlib import Path
import pytest
-from humanloop import FileType, AgentResponse, PromptResponse
+from humanloop import AgentResponse, PromptResponse
from humanloop.prompts.client import PromptsClient
from humanloop.agents.client import AgentsClient
from humanloop.error import HumanloopRuntimeError
-from tests.custom.types import GetHumanloopClientFn
-
-
-class SyncableFile(NamedTuple):
- path: str
- type: FileType
- model: str
- id: str = ""
- version_id: str = ""
-
-
-@pytest.fixture
-def syncable_files_fixture(
- get_humanloop_client: GetHumanloopClientFn,
- sdk_test_dir: str,
-) -> Generator[list[SyncableFile], None, None]:
- """Creates a predefined structure of files in Humanloop for testing sync."""
- files: List[SyncableFile] = [
- SyncableFile(
- path="prompts/gpt-4",
- type="prompt",
- model="gpt-4",
- ),
- SyncableFile(
- path="prompts/gpt-4o",
- type="prompt",
- model="gpt-4o",
- ),
- SyncableFile(
- path="prompts/nested/complex/gpt-4o",
- type="prompt",
- model="gpt-4o",
- ),
- SyncableFile(
- path="agents/gpt-4",
- type="agent",
- model="gpt-4",
- ),
- SyncableFile(
- path="agents/gpt-4o",
- type="agent",
- model="gpt-4o",
- ),
- ]
-
- humanloop_client = get_humanloop_client()
- created_files = []
- for file in files:
- full_path = f"{sdk_test_dir}/{file.path}"
- response: Union[AgentResponse, PromptResponse]
- if file.type == "prompt":
- response = humanloop_client.prompts.upsert(
- path=full_path,
- model=file.model,
- )
- elif file.type == "agent":
- response = humanloop_client.agents.upsert(
- path=full_path,
- model=file.model,
- )
- created_files.append(
- SyncableFile(
- path=full_path, type=file.type, model=file.model, id=response.id, version_id=response.version_id
- )
- )
-
- humanloop_client.pull()
-
- yield created_files
+from tests.custom.types import GetHumanloopClientFn, SyncableFile
@pytest.fixture
@@ -96,7 +28,7 @@ def test_pull_basic(
humanloop_client = get_humanloop_client()
# WHEN running the sync
- successful_files = humanloop_client.pull()
+ humanloop_client.pull()
# THEN our local filesystem should mirror the remote filesystem in the HL Workspace
for file in syncable_files_fixture:
diff --git a/tests/custom/integration/test_sync_cli.py b/tests/custom/integration/test_sync_cli.py
new file mode 100644
index 00000000..41eaeede
--- /dev/null
+++ b/tests/custom/integration/test_sync_cli.py
@@ -0,0 +1,183 @@
+from pathlib import Path
+from unittest import mock
+import pytest
+from click.testing import CliRunner
+from humanloop.cli.__main__ import cli
+from tests.custom.types import SyncableFile
+
+
+@pytest.fixture
+def no_env_file_loading():
+ """Fixture that prevents loading API keys from any .env files.
+
+ Use this fixture in tests that verify behavior when no .env files should
+ be processed, regardless of whether they exist or not.
+ """
+ # Prevent any .env file from being loaded
+ with mock.patch("humanloop.cli.__main__.load_dotenv", lambda *args, **kwargs: None):
+ yield
+
+
+def test_pull_without_api_key(cli_runner: CliRunner, no_humanloop_api_key_in_env, no_env_file_loading):
+ """GIVEN no API key in environment
+ WHEN running pull command
+ THEN it should fail with appropriate error message
+ """
+ # WHEN running pull command
+ result = cli_runner.invoke(cli, ["pull", "--base-dir", "humanloop"])
+
+ # THEN it should fail with appropriate error message
+ assert result.exit_code == 1 # Our custom error code for API key issues
+ assert "No API key found" in result.output
+ assert "Set HUMANLOOP_API_KEY in .env file or environment" in result.output
+
+
+def test_pull_basic(
+ cli_runner: CliRunner,
+ syncable_files_fixture: list[SyncableFile],
+ tmp_path: Path, # this path is used as a temporary store for files locally
+):
+ # GIVEN a base directory for pulled files
+ base_dir = str(tmp_path / "humanloop")
+
+ # WHEN running pull command
+ result = cli_runner.invoke(cli, ["pull", "--base-dir", base_dir, "--verbose"])
+
+ # THEN it should succeed
+ assert result.exit_code == 0
+ assert "Pulling files from Humanloop..." in result.output
+ assert "Pull completed" in result.output
+
+ # THEN the files should exist locally
+ for file in syncable_files_fixture:
+ extension = f".{file.type}"
+ local_path = Path(base_dir) / f"{file.path}{extension}"
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+ assert local_path.read_text(), f"File at {local_path} should not be empty"
+
+
+def test_pull_with_specific_path(
+ cli_runner: CliRunner,
+ syncable_files_fixture: list[SyncableFile],
+ tmp_path: Path,
+):
+ """GIVEN a specific path to pull
+ WHEN running pull command with path
+ THEN it should pull only files from that path
+ """
+ # GIVEN a base directory and specific path
+ base_dir = str(tmp_path / "humanloop")
+ test_path = syncable_files_fixture[
+ 0
+ ].path.split(
+ "/"
+ )[
+ 0
+ ] # Retrieve the prefix of the first file's path which corresponds to the sdk_test_dir used within syncable_files_fixture
+
+ # WHEN running pull command with path
+ result = cli_runner.invoke(
+ cli, ["pull", "--base-dir", base_dir, "--path", test_path, "--verbose", "--base-url", "http://localhost:80/v5"]
+ )
+
+ # THEN it should succeed and show the path
+ assert result.exit_code == 0
+ assert f"Path: {test_path}" in result.output
+
+ # THEN only files from that path should exist locally
+ for file in syncable_files_fixture:
+ extension = f".{file.type}"
+ local_path = Path(base_dir) / f"{file.path}{extension}"
+ if file.path.startswith(test_path):
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+ else:
+ assert not local_path.exists(), f"Unexpected file at {local_path}"
+
+
+def test_pull_with_environment(
+ cli_runner: CliRunner,
+ syncable_files_fixture: list[SyncableFile],
+ tmp_path: Path,
+):
+ # GIVEN a base directory and environment
+ base_dir = str(tmp_path / "humanloop")
+ environment = "staging"
+
+ # WHEN running pull command with environment
+ result = cli_runner.invoke(
+ cli,
+ [
+ "pull",
+ "--base-dir",
+ base_dir,
+ "--environment",
+ environment,
+ "--verbose",
+ "--base-url",
+ "http://localhost:80/v5",
+ ],
+ )
+
+ # THEN it should succeed and show the environment
+ assert result.exit_code == 0
+ assert f"Environment: {environment}" in result.output
+
+
+def test_pull_with_quiet_mode(
+ cli_runner: CliRunner,
+ syncable_files_fixture: list[SyncableFile],
+ tmp_path: Path,
+):
+ # GIVEN a base directory and quiet mode
+ base_dir = str(tmp_path / "humanloop")
+
+ # WHEN running pull command with quiet mode
+ result = cli_runner.invoke(cli, ["pull", "--base-dir", base_dir, "--quiet"])
+
+ # THEN it should succeed but not show file list
+ assert result.exit_code == 0
+ assert "Successfully pulled" not in result.output
+
+ # THEN files should still be pulled
+ for file in syncable_files_fixture:
+ extension = f".{file.type}"
+ local_path = Path(base_dir) / f"{file.path}{extension}"
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+
+
+def test_pull_with_invalid_path(
+ cli_runner: CliRunner,
+):
+ # GIVEN an invalid base directory
+ path = "nonexistent/path"
+
+ # WHEN running pull command
+ result = cli_runner.invoke(cli, ["pull", "--path", path])
+
+ # THEN it should fail
+ assert result.exit_code == 1
+ assert "Error" in result.output
+
+
+def test_pull_with_invalid_environment(cli_runner: CliRunner, tmp_path: Path):
+ # GIVEN an invalid environment
+ environment = "nonexistent"
+ base_dir = str(tmp_path / "humanloop")
+
+ # WHEN running pull command
+ result = cli_runner.invoke(
+ cli,
+ [
+ "pull",
+ "--base-dir",
+ base_dir,
+ "--environment",
+ environment,
+ "--verbose",
+ ],
+ )
+
+ # THEN it should fail
+ assert result.exit_code == 1
+ assert "Error" in result.output
diff --git a/tests/custom/types.py b/tests/custom/types.py
index 98de8c38..7a198456 100644
--- a/tests/custom/types.py
+++ b/tests/custom/types.py
@@ -1,5 +1,15 @@
-from typing import Protocol
+from typing import Protocol, NamedTuple
from humanloop.client import Humanloop
+from humanloop import FileType
+
class GetHumanloopClientFn(Protocol):
def __call__(self, use_local_files: bool = False) -> Humanloop: ...
+
+
+class SyncableFile(NamedTuple):
+ path: str
+ type: FileType
+ model: str
+ id: str = ""
+ version_id: str = ""
From a25933591323467e15821b822d836fd397fa410a Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 10:31:28 +0100
Subject: [PATCH 29/39] Clean up in response to PR feedback
---
src/humanloop/cli/__main__.py | 31 ++++++------
src/humanloop/overload.py | 15 ++++--
src/humanloop/sync/sync_client.py | 80 ++++++++++++++-----------------
3 files changed, 60 insertions(+), 66 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index 186de14e..56fa288f 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -2,7 +2,7 @@
import logging
from typing import Optional, Callable
from functools import wraps
-from dotenv import load_dotenv, find_dotenv
+from dotenv import load_dotenv
import os
import sys
from humanloop import Humanloop
@@ -26,10 +26,9 @@
def load_api_key(env_file: Optional[str] = None) -> str:
- """Load API key from provided value, .env file, or environment variable.
+ """Load API key from .env file or environment variable.
Args:
- api_key: Optional API key provided directly
env_file: Optional path to .env file
Returns:
@@ -38,18 +37,17 @@ def load_api_key(env_file: Optional[str] = None) -> str:
Raises:
click.ClickException: If no API key is found
"""
- # Try loading from .env file
+ # Try specific .env file if provided, otherwise default to .env in current directory
if env_file:
- load_dotenv(env_file)
+ if not load_dotenv(env_file): # load_dotenv returns False if file not found/invalid
+ raise click.ClickException(
+ click.style(
+ f"Failed to load environment file: {env_file} (file not found or invalid format)",
+ fg=ERROR_COLOR,
+ )
+ )
else:
- # Try to find .env file in current directory or parent directories
- env_path = find_dotenv()
- if env_path:
- load_dotenv(env_path)
- elif os.path.exists(".env"):
- load_dotenv(".env")
- else:
- load_dotenv()
+ load_dotenv() # Attempt to load from default .env in current directory
# Get API key from environment
api_key = os.getenv("HUMANLOOP_API_KEY")
@@ -151,8 +149,9 @@ def cli(): # Does nothing because used as a group for other subcommands (pull,
@click.option(
"--path",
"-p",
- help="Path in the Humanloop workspace to pull from (file or directory). You can pull an entire directory (e.g. 'my/directory/') "
- "or a specific file (e.g. 'my/directory/my_prompt.prompt'). When pulling a directory, all files within that directory and its subdirectories will be included.",
+ help="Path in the Humanloop workspace to pull from (file or directory). You can pull an entire directory (e.g. 'my/directory') "
+ "or a specific file (e.g. 'my/directory/my_prompt.prompt'). When pulling a directory, all files within that directory and its subdirectories will be included. "
+ "If not specified, pulls from the root of the workspace.",
default=None,
)
@click.option(
@@ -190,7 +189,7 @@ def pull(
\b
This command will:
1. Fetch Prompt and Agent files from your Humanloop workspace
- 2. Save them to your local filesystem (default: humanloop/)
+ 2. Save them to your local filesystem (default directory: humanloop/)
3. Maintain the same directory structure as in Humanloop
4. Add appropriate file extensions (.prompt or .agent)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index f66f6cdf..d04ca918 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -22,15 +22,20 @@
from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
from humanloop.types.create_tool_log_response import CreateToolLogResponse
from humanloop.types.prompt_call_response import PromptCallResponse
+from humanloop.types.agent_call_response import AgentCallResponse
logger = logging.getLogger("humanloop.sdk")
-ResponseType = Union[
+LogResponseType = Union[
CreatePromptLogResponse,
CreateToolLogResponse,
CreateFlowLogResponse,
CreateEvaluatorLogResponse,
+]
+
+CallResponseType = Union[
PromptCallResponse,
+ AgentCallResponse,
]
@@ -128,7 +133,7 @@ def _handle_evaluation_context(kwargs: Dict[str, Any]) -> tuple[Dict[str, Any],
return kwargs, None
-def _overload_log(self: Any, sync_client: Optional[SyncClient], use_local_files: bool, **kwargs) -> ResponseType:
+def _overload_log(self: Any, sync_client: Optional[SyncClient], use_local_files: bool, **kwargs) -> LogResponseType:
try:
# Special handling for flows - prevent direct log usage
if type(self) is FlowsClient and get_trace_id() is not None:
@@ -162,7 +167,7 @@ def _overload_log(self: Any, sync_client: Optional[SyncClient], use_local_files:
raise HumanloopRuntimeError from e
-def _overload_call(self: Any, sync_client: Optional[SyncClient], use_local_files: bool, **kwargs) -> PromptCallResponse:
+def _overload_call(self: Any, sync_client: Optional[SyncClient], use_local_files: bool, **kwargs) -> CallResponseType:
try:
kwargs = _handle_tracing_context(kwargs, self)
kwargs = _handle_local_files(kwargs, self, sync_client, use_local_files)
@@ -186,7 +191,7 @@ def overload_client(
client._log = client.log # type: ignore [attr-defined]
# Create a closure to capture sync_client and use_local_files
- def log_wrapper(self: Any, **kwargs) -> ResponseType:
+ def log_wrapper(self: Any, **kwargs) -> LogResponseType:
return _overload_log(self, sync_client, use_local_files, **kwargs)
client.log = types.MethodType(log_wrapper, client)
@@ -200,7 +205,7 @@ def log_wrapper(self: Any, **kwargs) -> ResponseType:
client._call = client.call # type: ignore [attr-defined]
# Create a closure to capture sync_client and use_local_files
- def call_wrapper(self: Any, **kwargs) -> PromptCallResponse:
+ def call_wrapper(self: Any, **kwargs) -> CallResponseType:
return _overload_call(self, sync_client, use_local_files, **kwargs)
client.call = types.MethodType(call_wrapper, client)
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index ff7a8aeb..be7cb59b 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -33,8 +33,10 @@ def format_api_error(error: Exception) -> str:
try:
# Extract the body part and parse as JSON
body_str = error_msg.split("body: ")[1]
- # Convert Python dict string to valid JSON by replacing single quotes with double quotes
- body_str = body_str.replace("'", '"')
+ # Convert Python dict string to valid JSON by:
+ # 1. Escaping double quotes
+ # 2. Replacing single quotes with double quotes
+ body_str = body_str.replace('"', '\\"').replace("'", '"')
body = json.loads(body_str)
# Get the detail from the body
@@ -52,7 +54,7 @@ def format_api_error(error: Exception) -> str:
return error_msg
-_SERIALIZABLE_FILE_TYPES_TYPE = typing.Literal["prompt", "agent"]
+SerializableFileType = typing.Literal["prompt", "agent"]
class SyncClient:
@@ -68,7 +70,7 @@ class SyncClient:
"""
# File types that can be serialized to/from the filesystem
- SERIALIZABLE_FILE_TYPES = ["prompt", "agent"]
+ SERIALIZABLE_FILE_TYPES = frozenset(typing.get_args(SerializableFileType))
def __init__(
self,
@@ -92,19 +94,18 @@ def __init__(
logger.setLevel(log_level)
# Create a new cached version of get_file_content with the specified cache size
- # @TODO: @ale, maybe move the cache to the client?
self.get_file_content = lru_cache(maxsize=cache_size)( # type: ignore [assignment]
self._get_file_content_implementation,
)
- def _get_file_content_implementation(self, path: str, file_type: FileType) -> str:
+ def _get_file_content_implementation(self, path: str, file_type: SerializableFileType) -> str:
"""Implementation of get_file_content without the cache decorator.
This is the actual implementation that gets wrapped by lru_cache.
Args:
path: The normalized path to the file (without extension)
- file_type: The type of file (Prompt or Agent)
+ file_type: The type of file to get the content of (SerializableFileType)
Returns:
The raw file content
@@ -132,7 +133,7 @@ def _get_file_content_implementation(self, path: str, file_type: FileType) -> st
except Exception as e:
raise HumanloopRuntimeError(f"Error reading local file {local_path}: {str(e)}")
- def get_file_content(self, path: str, file_type: FileType) -> str:
+ def get_file_content(self, path: str, file_type: SerializableFileType) -> str:
"""Get the raw file content of a file from cache or filesystem.
This method uses an LRU cache to store file contents. When the cache is full,
@@ -152,7 +153,6 @@ def get_file_content(self, path: str, file_type: FileType) -> str:
def clear_cache(self) -> None:
"""Clear the LRU cache."""
- # @TODO: @ale, why not put the cache on the client? This is a bit of a hack.
self.get_file_content.cache_clear() # type: ignore [attr-defined]
def _normalize_path(self, path: str) -> str:
@@ -170,14 +170,14 @@ def _normalize_path(self, path: str) -> str:
return "/".join(part for part in normalized.replace("\\", "/").split("/") if part)
def is_file(self, path: str) -> bool:
- """Check if the path is a file by checking for .prompt or .agent extension."""
- return path.endswith(".prompt") or path.endswith(".agent")
+ """Check if the path is a file by checking for .{file_type} extension for serializable file types."""
+ return path.endswith(tuple(f".{file_type}" for file_type in self.SERIALIZABLE_FILE_TYPES))
def _save_serialized_file(
self,
serialized_content: str,
file_path: str,
- file_type: typing.Literal["prompt", "agent"],
+ file_type: SerializableFileType,
) -> None:
"""Save serialized file to local filesystem."""
try:
@@ -192,25 +192,15 @@ def _save_serialized_file(
# Write raw file content to file
with open(new_path, "w") as f:
f.write(serialized_content)
-
- # Clear the cache when a file is saved
- self.clear_cache()
except Exception as e:
- logger.error(f"Failed to sync {file_type} {file_path}: {str(e)}")
+ logger.error(f"Failed to write {file_type} {file_path} to disk: {str(e)}")
raise
- def _pull_file(
- self,
- path: str | None = None,
- environment: str | None = None,
- ) -> bool:
+ def _pull_file(self, path: str | None = None, environment: str | None = None) -> bool:
"""Pull a specific file from Humanloop to local filesystem.
Returns:
True if the file was successfully pulled, False otherwise
-
- Raises:
- HumanloopRuntimeError: If there's an error communicating with the API
"""
try:
file = self.client.files.retrieve_by_path(
@@ -218,27 +208,23 @@ def _pull_file(
environment=environment,
include_raw_file_content=True,
)
- except Exception as e:
- logger.error(f"Failed to pull file {path}: {format_api_error(e)}")
- return False
+
+ if file.type not in self.SERIALIZABLE_FILE_TYPES:
+ logger.error(f"Unsupported file type: {file.type}")
+ return False
- if file.type not in self.SERIALIZABLE_FILE_TYPES:
- raise ValueError(f"Unsupported file type: {file.type}")
+ if not file.raw_file_content: # type: ignore [union-attr]
+ logger.error(f"No content found for {file.type} {path}")
+ return False
- file_type: _SERIALIZABLE_FILE_TYPES_TYPE = typing.cast(
- _SERIALIZABLE_FILE_TYPES_TYPE,
- file.type,
- )
-
- try:
self._save_serialized_file(
- serialized_content=file.raw_file_content, # type: ignore [union-attr, arg-type]
+ serialized_content=file.raw_file_content, # type: ignore [union-attr]
file_path=file.path,
- file_type=file_type,
+ file_type=typing.cast(SerializableFileType, file.type),
)
return True
except Exception as e:
- logger.error(f"Failed to save file {path}: {str(e)}")
+ logger.error(f"Failed to pull file {path}: {str(e)}")
return False
def _pull_directory(
@@ -251,7 +237,8 @@ def _pull_directory(
Returns:
Tuple of two lists:
- First list contains paths of successfully synced files
- - Second list contains paths of files that failed to sync
+ - Second list contains paths of files that failed to sync.
+ Failures can occur due to missing content in the response or errors during local file writing.
Raises:
HumanloopRuntimeError: If there's an error communicating with the API
@@ -266,7 +253,7 @@ def _pull_directory(
try:
logger.debug(f"`{path}`: Requesting page {page} of files")
response = self.client.files.list_files(
- type=["prompt", "agent"],
+ type=list(self.SERIALIZABLE_FILE_TYPES),
page=page,
include_raw_file_content=True,
environment=environment,
@@ -281,20 +268,20 @@ def _pull_directory(
# Process each file
for file in response.records:
- # Skip if not a Prompt or Agent
+ # Skip if not a serializable file type
if file.type not in self.SERIALIZABLE_FILE_TYPES:
logger.warning(f"Skipping unsupported file type: {file.type}")
continue
- file_type: _SERIALIZABLE_FILE_TYPES_TYPE = typing.cast(
- _SERIALIZABLE_FILE_TYPES_TYPE,
+ file_type: SerializableFileType = typing.cast(
+ SerializableFileType,
file.type,
)
# Skip if no raw file content
- # @TODO: @ale, inconsistent, other places we are throwing if file is not serialisable
if not getattr(file, "raw_file_content", None) or not file.raw_file_content: # type: ignore [union-attr]
- logger.warning(f"No content found for {file.type} {getattr(file, 'id', '')}")
+ logger.warning(f"No content found for {file.type} {file.path}")
+ failed_files.append(file.path)
continue
try:
@@ -368,6 +355,9 @@ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple
logger.debug(f"Pulling directory: {normalized_path}")
successful_files, failed_files = self._pull_directory(normalized_path, environment)
+ # Clear the cache at the end of each pull operation
+ self.clear_cache()
+
duration_ms = int((time.time() - start_time) * 1000)
logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
From 893b23c1079b9903626020952452a149e79f18c6 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 11:15:08 +0100
Subject: [PATCH 30/39] Raise error on providing absolute path to local files
---
.gitignore | 2 +-
src/humanloop/cli/__main__.py | 2 +-
src/humanloop/overload.py | 7 +++++--
src/humanloop/sync/sync_client.py | 13 ++++++++++---
4 files changed, 17 insertions(+), 7 deletions(-)
diff --git a/.gitignore b/.gitignore
index cf60f9b1..f5cda9d9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,7 +5,7 @@ poetry.toml
.ruff_cache/
.vscode
.env
-.env.test
tests/assets/*.jsonl
tests/assets/*.parquet
+# Ignore humanloop directory which could mistakenly be committed when testing sync functionality as it's used as the default sync directory
humanloop
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index 56fa288f..0895a85c 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -64,7 +64,7 @@ def load_api_key(env_file: Optional[str] = None) -> str:
def get_client(
api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None
) -> Humanloop:
- """Get a Humanloop client instance.
+ """Instantiate a Humanloop client for the CLI.
Args:
api_key: Optional API key provided directly
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index d04ca918..ab0d6fcb 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -108,6 +108,9 @@ def _handle_local_files(
)
file_type = _get_file_type_from_client(client)
+ if file_type not in SyncClient.SERIALIZABLE_FILE_TYPES:
+ raise HumanloopRuntimeError(f"Local files are not supported for `{file_type}` files.")
+
# If file_type is already specified in kwargs, it means user provided a PromptKernelRequestParams object
if file_type in kwargs and not isinstance(kwargs[file_type], str):
logger.warning(
@@ -117,7 +120,7 @@ def _handle_local_files(
return kwargs
try:
- file_content = sync_client.get_file_content(normalized_path, file_type)
+ file_content = sync_client.get_file_content(normalized_path, file_type) # type: ignore [arg-type] file_type was checked above
kwargs[file_type] = file_content
except HumanloopRuntimeError as e:
raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
@@ -186,7 +189,7 @@ def overload_client(
use_local_files: bool = False,
) -> Any:
"""Overloads client methods to add tracing, local file handling, and evaluation context."""
- # Store original log method as _log for all clients. Used in flow decorator
+ # Store original log method as _log for all clients. Used in flow decorator
if hasattr(client, "log") and not hasattr(client, "_log"):
client._log = client.log # type: ignore [attr-defined]
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index be7cb59b..f6f421c7 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -3,7 +3,6 @@
from typing import List, Tuple, TYPE_CHECKING
from functools import lru_cache
import typing
-from humanloop.types import FileType
import time
from humanloop.error import HumanloopRuntimeError
import json
@@ -164,6 +163,14 @@ def _normalize_path(self, path: str) -> str:
# Convert to Path object to handle platform-specific separators
path_obj = Path(path)
+ # Paths are considered absolute on unix-like systems if they start with a forward slash.
+ # This is because we want to ensure seamless toggling between the local and remote filesystems.
+ if path_obj.is_absolute():
+ raise HumanloopRuntimeError(
+ f"Absolute paths are not supported: `{path}`. "
+ f"Paths should be relative to the base directory (`{self.base_dir}`)."
+ )
+
# Remove extension, convert to string with forward slashes, and remove leading/trailing slashes
normalized = str(path_obj.with_suffix(""))
# Replace all backslashes and normalize multiple forward slashes
@@ -208,7 +215,7 @@ def _pull_file(self, path: str | None = None, environment: str | None = None) ->
environment=environment,
include_raw_file_content=True,
)
-
+
if file.type not in self.SERIALIZABLE_FILE_TYPES:
logger.error(f"Unsupported file type: {file.type}")
return False
@@ -237,7 +244,7 @@ def _pull_directory(
Returns:
Tuple of two lists:
- First list contains paths of successfully synced files
- - Second list contains paths of files that failed to sync.
+ - Second list contains paths of files that failed to sync.
Failures can occur due to missing content in the response or errors during local file writing.
Raises:
From 2072cf381fe0597c4ed0a11aa8bf2e8dd87735f9 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 11:26:17 +0100
Subject: [PATCH 31/39] Rename files_directory to local_files_directory in HL
Client
---
src/humanloop/client.py | 18 +++++++++++++++---
1 file changed, 15 insertions(+), 3 deletions(-)
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 83810d8b..6feb4abb 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,6 +1,7 @@
import os
import typing
from typing import Any, List, Optional, Sequence, Tuple
+import logging
import httpx
from opentelemetry.sdk.resources import Resource
@@ -31,6 +32,8 @@
from humanloop.prompts.client import PromptsClient
from humanloop.sync.sync_client import SyncClient, DEFAULT_CACHE_SIZE
+logger = logging.getLogger("humanloop.sdk")
+
class ExtendedEvalsClient(EvaluationsClient):
"""
@@ -105,7 +108,7 @@ def __init__(
opentelemetry_tracer_provider: Optional[TracerProvider] = None,
opentelemetry_tracer: Optional[Tracer] = None,
use_local_files: bool = False,
- files_directory: str = "humanloop",
+ local_files_directory: str = "humanloop",
cache_size: int = DEFAULT_CACHE_SIZE,
):
"""
@@ -128,7 +131,7 @@ def __init__(
opentelemetry_tracer_provider: Optional tracer provider for telemetry
opentelemetry_tracer: Optional tracer for telemetry
use_local_files: Whether to use local files for prompts and agents
- files_directory: Directory for local files (default: "humanloop")
+ local_files_directory: Directory for local files (default: "humanloop")
cache_size: Maximum number of files to cache when use_local_files is True (default: DEFAULT_CACHE_SIZE).
This parameter has no effect if use_local_files is False.
"""
@@ -142,7 +145,16 @@ def __init__(
)
self.use_local_files = use_local_files
- self._sync_client = SyncClient(client=self, base_dir=files_directory, cache_size=cache_size)
+
+ # Warn user if cache_size is non-default but use_local_files is False — has no effect and will therefore be ignored
+ if not self.use_local_files and cache_size != DEFAULT_CACHE_SIZE:
+ logger.warning(
+ f"The specified cache_size={cache_size} will have no effect because use_local_files=False. "
+ f"File caching is only active when local files are enabled."
+ )
+
+ # Check if cache_size is non-default but use_local_files is False
+ self._sync_client = SyncClient(client=self, base_dir=local_files_directory, cache_size=cache_size)
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
From 73966f5a8f7cfff504d47664d3098b4e96e7690c Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 11:33:05 +0100
Subject: [PATCH 32/39] Revert modification to auto-generated code
---
src/humanloop/files/raw_client.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 9b9bf17f..02f371ac 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -132,7 +132,7 @@ def list_files(
def retrieve_by_path(
self,
*,
- path: str | None = None,
+ path: str,
environment: typing.Optional[str] = None,
include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
From f6a9e6326250cf43ec153c6d6827e21b147b2f95 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 12:00:48 +0100
Subject: [PATCH 33/39] Rename base_dir to local_files_directory in CLI for
consistency + docs cleanup
---
src/humanloop/cli/__main__.py | 22 ++++++++------
src/humanloop/client.py | 48 ++++++++++++++++++++-----------
src/humanloop/sync/sync_client.py | 6 ++--
3 files changed, 49 insertions(+), 27 deletions(-)
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
index 0895a85c..ad582bbc 100644
--- a/src/humanloop/cli/__main__.py
+++ b/src/humanloop/cli/__main__.py
@@ -99,8 +99,9 @@ def common_options(f: Callable) -> Callable:
show_default=False,
)
@click.option(
- "--base-dir",
- help="Base directory for pulled files (default: humanloop/)",
+ "--local-files-directory",
+ "--local-dir",
+ help="Directory (relative to the current working directory) where Humanloop files are stored locally (default: humanloop/).",
default="humanloop",
type=click.Path(),
)
@@ -151,7 +152,7 @@ def cli(): # Does nothing because used as a group for other subcommands (pull,
"-p",
help="Path in the Humanloop workspace to pull from (file or directory). You can pull an entire directory (e.g. 'my/directory') "
"or a specific file (e.g. 'my/directory/my_prompt.prompt'). When pulling a directory, all files within that directory and its subdirectories will be included. "
- "If not specified, pulls from the root of the workspace.",
+ "If not specified, pulls from the root of the remote workspace.",
default=None,
)
@click.option(
@@ -179,7 +180,7 @@ def pull(
environment: Optional[str],
api_key: Optional[str],
env_file: Optional[str],
- base_dir: str,
+ local_files_directory: str,
base_url: Optional[str],
verbose: bool,
quiet: bool,
@@ -189,13 +190,13 @@ def pull(
\b
This command will:
1. Fetch Prompt and Agent files from your Humanloop workspace
- 2. Save them to your local filesystem (default directory: humanloop/)
+ 2. Save them to your local filesystem (directory specified by --local-files-directory, default: humanloop/)
3. Maintain the same directory structure as in Humanloop
4. Add appropriate file extensions (.prompt or .agent)
\b
- The files will be saved with the following structure:
- humanloop/
+ For example, with the default --local-files-directory=humanloop, files will be saved as:
+ ./humanloop/
├── my_project/
│ ├── prompts/
│ │ ├── my_prompt.prompt
@@ -207,12 +208,17 @@ def pull(
└── prompts/
└── other_prompt.prompt
+ \b
+ If you specify --local-files-directory=data/humanloop, files will be saved in ./data/humanloop/ instead.
+
If a file exists both locally and in the Humanloop workspace, the local file will be overwritten
with the version from Humanloop. Files that only exist locally will not be affected.
Currently only supports syncing Prompt and Agent files. Other file types will be skipped."""
client = get_client(api_key, env_file, base_url)
- sync_client = SyncClient(client, base_dir=base_dir, log_level=logging.DEBUG if verbose else logging.WARNING)
+ sync_client = SyncClient(
+ client, base_dir=local_files_directory, log_level=logging.DEBUG if verbose else logging.WARNING
+ )
click.echo(click.style("Pulling files from Humanloop...", fg=INFO_COLOR))
click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 6feb4abb..fce02a98 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -131,7 +131,13 @@ def __init__(
opentelemetry_tracer_provider: Optional tracer provider for telemetry
opentelemetry_tracer: Optional tracer for telemetry
use_local_files: Whether to use local files for prompts and agents
- local_files_directory: Directory for local files (default: "humanloop")
+ local_files_directory: Base directory where local prompt and agent files are stored (default: "humanloop").
+ This is relative to the current working directory. For example:
+ - "humanloop" will look for files in "./humanloop/"
+ - "data/humanloop" will look for files in "./data/humanloop/"
+ When using paths in the API, they must be relative to this directory. For example,
+ if local_files_directory="humanloop" and you have a file at "humanloop/samples/test.prompt",
+ you would reference it as "samples/test" in your code.
cache_size: Maximum number of files to cache when use_local_files is True (default: DEFAULT_CACHE_SIZE).
This parameter has no effect if use_local_files is False.
"""
@@ -389,40 +395,50 @@ def agent():
attributes=attributes,
)
- def pull(self, environment: str | None = None, path: str | None = None) -> Tuple[List[str], List[str]]:
+ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple[List[str], List[str]]:
"""Pull Prompt and Agent files from Humanloop to local filesystem.
This method will:
1. Fetch Prompt and Agent files from your Humanloop workspace
- 2. Save them to the local filesystem using the client's files_directory (set during initialization)
+ 2. Save them to your local filesystem (directory specified by `local_files_directory`, default: "humanloop")
3. Maintain the same directory structure as in Humanloop
- 4. Add appropriate file extensions (.prompt or .agent)
+ 4. Add appropriate file extensions (`.prompt` or `.agent`)
The path parameter can be used in two ways:
- If it points to a specific file (e.g. "path/to/file.prompt" or "path/to/file.agent"), only that file will be pulled
- - If it points to a directory (e.g. "path/to/directory"), all Prompt and Agent files in that directory will be pulled
+ - If it points to a directory (e.g. "path/to/directory"), all Prompt and Agent files in that directory and its subdirectories will be pulled
- If no path is provided, all Prompt and Agent files will be pulled
The operation will overwrite existing files with the latest version from Humanloop
but will not delete local files that don't exist in the remote workspace.
- Currently only supports syncing prompt and agent files. Other file types will be skipped.
+ Currently only supports syncing Prompt and Agent files. Other file types will be skipped.
- The files will be saved with the following structure:
+ For example, with the default `local_files_directory="humanloop"`, files will be saved as:
```
- {files_directory}/
- ├── prompts/
- │ ├── my_prompt.prompt
- │ └── nested/
- │ └── another_prompt.prompt
- └── agents/
- └── my_agent.agent
+ ./humanloop/
+ ├── my_project/
+ │ ├── prompts/
+ │ │ ├── my_prompt.prompt
+ │ │ └── nested/
+ │ │ └── another_prompt.prompt
+ │ └── agents/
+ │ └── my_agent.agent
+ └── another_project/
+ └── prompts/
+ └── other_prompt.prompt
```
- :param environment: The environment to pull the files from.
+ If you specify `local_files_directory="data/humanloop"`, files will be saved in ./data/humanloop/ instead.
+
:param path: Optional path to either a specific file (e.g. "path/to/file.prompt") or a directory (e.g. "path/to/directory").
If not provided, all Prompt and Agent files will be pulled.
- :return: List of successfully processed file paths.
+ :param environment: The environment to pull the files from.
+ :return: Tuple of two lists:
+ - First list contains paths of successfully synced files
+ - Second list contains paths of files that failed to sync (due to API errors, missing content,
+ or filesystem issues)
+ :raises HumanloopRuntimeError: If there's an error communicating with the API
"""
return self._sync_client.pull(environment=environment, path=path)
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index f6f421c7..b8a7db09 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -163,8 +163,8 @@ def _normalize_path(self, path: str) -> str:
# Convert to Path object to handle platform-specific separators
path_obj = Path(path)
- # Paths are considered absolute on unix-like systems if they start with a forward slash.
- # This is because we want to ensure seamless toggling between the local and remote filesystems.
+ # Reject absolute paths to ensure all paths are relative to base_dir.
+ # This maintains consistency with the remote filesystem where paths are relative to project root.
if path_obj.is_absolute():
raise HumanloopRuntimeError(
f"Absolute paths are not supported: `{path}`. "
@@ -329,7 +329,7 @@ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple
Returns:
Tuple of two lists:
- First list contains paths of successfully synced files
- - Second list contains paths of files that failed to sync
+ - Second list contains paths of files that failed to sync (e.g. failed to write to disk or missing raw content)
Raises:
HumanloopRuntimeError: If there's an error communicating with the API
From b1fb94f85130f423a213008c4a5478e753d51b38 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 12:03:48 +0100
Subject: [PATCH 34/39] Fix type error in test
---
tests/custom/sync/test_client.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/tests/custom/sync/test_client.py b/tests/custom/sync/test_client.py
index 37cec52b..f3ab7e83 100644
--- a/tests/custom/sync/test_client.py
+++ b/tests/custom/sync/test_client.py
@@ -2,7 +2,7 @@
import pytest
from pathlib import Path
from unittest.mock import Mock, patch
-from humanloop.sync.sync_client import SyncClient
+from humanloop.sync.sync_client import SyncClient, SerializableFileType
from humanloop.error import HumanloopRuntimeError
from typing import Literal
@@ -65,7 +65,7 @@ def test_save_and_read_file(sync_client: SyncClient):
# GIVEN a file content and path
content = "test content"
path = "test/path"
- file_type = "prompt"
+ file_type: SerializableFileType = "prompt"
# WHEN saving the file
sync_client._save_serialized_file(content, path, "prompt")
From efd06545687df1132b8a513629ff4ca6b89eb5e9 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 12:39:50 +0100
Subject: [PATCH 35/39] Increase page size for listing files in pull
implementation from 10 to 100
---
src/humanloop/sync/sync_client.py | 3 ++-
tests/custom/sync/test_client.py | 15 ++++++---------
2 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index b8a7db09..97c4a28f 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -203,7 +203,7 @@ def _save_serialized_file(
logger.error(f"Failed to write {file_type} {file_path} to disk: {str(e)}")
raise
- def _pull_file(self, path: str | None = None, environment: str | None = None) -> bool:
+ def _pull_file(self, path: str, environment: str | None = None) -> bool:
"""Pull a specific file from Humanloop to local filesystem.
Returns:
@@ -262,6 +262,7 @@ def _pull_directory(
response = self.client.files.list_files(
type=list(self.SERIALIZABLE_FILE_TYPES),
page=page,
+ size=100,
include_raw_file_content=True,
environment=environment,
path=path,
diff --git a/tests/custom/sync/test_client.py b/tests/custom/sync/test_client.py
index f3ab7e83..a349fd0c 100644
--- a/tests/custom/sync/test_client.py
+++ b/tests/custom/sync/test_client.py
@@ -28,7 +28,7 @@ def test_init(sync_client: SyncClient, tmp_path: Path):
# THEN it should be initialized with correct base directory, cache size and file types
assert sync_client.base_dir == tmp_path
assert sync_client._cache_size == 10
- assert sync_client.SERIALIZABLE_FILE_TYPES == ["prompt", "agent"]
+ assert sync_client.SERIALIZABLE_FILE_TYPES == frozenset(["prompt", "agent"])
def test_normalize_path(sync_client: SyncClient):
@@ -37,7 +37,6 @@ def test_normalize_path(sync_client: SyncClient):
test_cases = [
("path/to/file.prompt", "path/to/file"),
("path\\to\\file.agent", "path/to/file"),
- ("/leading/slashes/file.prompt", "leading/slashes/file"),
("trailing/slashes/file.agent/", "trailing/slashes/file"),
("multiple//slashes//file.prompt", "multiple/slashes/file"),
]
@@ -48,6 +47,10 @@ def test_normalize_path(sync_client: SyncClient):
# THEN they should be converted to the expected format
assert normalized == expected
+ # Test absolute path raises error
+ with pytest.raises(HumanloopRuntimeError, match="Absolute paths are not supported"):
+ sync_client._normalize_path("/leading/slashes/file.prompt")
+
def test_is_file(sync_client: SyncClient):
"""Test file type detection."""
@@ -65,7 +68,7 @@ def test_save_and_read_file(sync_client: SyncClient):
# GIVEN a file content and path
content = "test content"
path = "test/path"
- file_type: SerializableFileType = "prompt"
+ file_type: SerializableFileType = "prompt"
# WHEN saving the file
sync_client._save_serialized_file(content, path, "prompt")
@@ -90,12 +93,6 @@ def test_error_handling(sync_client: SyncClient):
with pytest.raises(HumanloopRuntimeError, match="Local file not found"):
sync_client.get_file_content("nonexistent", "prompt")
- # GIVEN an invalid file type
- # WHEN trying to pull the file
- # THEN a ValueError should be raised
- with pytest.raises(ValueError, match="Unsupported file type"):
- sync_client._pull_file("test.txt")
-
# GIVEN an API error
# WHEN trying to pull a file
# THEN it should return False
From 6c2acf3d300b4ce83d4dcae41dbc8767b97958c4 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 14:33:01 +0100
Subject: [PATCH 36/39] test: fix CLI tests
---
test.py | 0
tests/custom/integration/test_sync_cli.py | 16 ++++++----------
2 files changed, 6 insertions(+), 10 deletions(-)
create mode 100644 test.py
diff --git a/test.py b/test.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/custom/integration/test_sync_cli.py b/tests/custom/integration/test_sync_cli.py
index 41eaeede..3957aed2 100644
--- a/tests/custom/integration/test_sync_cli.py
+++ b/tests/custom/integration/test_sync_cli.py
@@ -24,7 +24,7 @@ def test_pull_without_api_key(cli_runner: CliRunner, no_humanloop_api_key_in_env
THEN it should fail with appropriate error message
"""
# WHEN running pull command
- result = cli_runner.invoke(cli, ["pull", "--base-dir", "humanloop"])
+ result = cli_runner.invoke(cli, ["pull", "--local-files-directory", "humanloop"])
# THEN it should fail with appropriate error message
assert result.exit_code == 1 # Our custom error code for API key issues
@@ -41,7 +41,7 @@ def test_pull_basic(
base_dir = str(tmp_path / "humanloop")
# WHEN running pull command
- result = cli_runner.invoke(cli, ["pull", "--base-dir", base_dir, "--verbose"])
+ result = cli_runner.invoke(cli, ["pull", "--local-files-directory", base_dir, "--verbose"])
# THEN it should succeed
assert result.exit_code == 0
@@ -77,9 +77,7 @@ def test_pull_with_specific_path(
] # Retrieve the prefix of the first file's path which corresponds to the sdk_test_dir used within syncable_files_fixture
# WHEN running pull command with path
- result = cli_runner.invoke(
- cli, ["pull", "--base-dir", base_dir, "--path", test_path, "--verbose", "--base-url", "http://localhost:80/v5"]
- )
+ result = cli_runner.invoke(cli, ["pull", "--local-files-directory", base_dir, "--path", test_path, "--verbose"])
# THEN it should succeed and show the path
assert result.exit_code == 0
@@ -109,13 +107,11 @@ def test_pull_with_environment(
cli,
[
"pull",
- "--base-dir",
+ "--local-files-directory",
base_dir,
"--environment",
environment,
"--verbose",
- "--base-url",
- "http://localhost:80/v5",
],
)
@@ -133,7 +129,7 @@ def test_pull_with_quiet_mode(
base_dir = str(tmp_path / "humanloop")
# WHEN running pull command with quiet mode
- result = cli_runner.invoke(cli, ["pull", "--base-dir", base_dir, "--quiet"])
+ result = cli_runner.invoke(cli, ["pull", "--local-files-directory", base_dir, "--quiet"])
# THEN it should succeed but not show file list
assert result.exit_code == 0
@@ -170,7 +166,7 @@ def test_pull_with_invalid_environment(cli_runner: CliRunner, tmp_path: Path):
cli,
[
"pull",
- "--base-dir",
+ "--local-files-directory",
base_dir,
"--environment",
environment,
From bd0a00f1c1c089e3129d7542feeecc03eba2309a Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 16:42:34 +0100
Subject: [PATCH 37/39] fix type issues
---
src/humanloop/files/client.py | 3 +--
src/humanloop/overload.py | 8 ++++----
src/humanloop/sync/sync_client.py | 2 +-
3 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index 0f8c69fc..b4fd4274 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -118,8 +118,7 @@ def list_files(
def retrieve_by_path(
self,
*,
- # @TODO: @ale, can this be None?
- path: str | None = None,
+ path: str,
environment: typing.Optional[str] = None,
include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index ab0d6fcb..dc02488b 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -75,7 +75,7 @@ def _handle_tracing_context(kwargs: Dict[str, Any], client: Any) -> Dict[str, An
if "trace_parent_id" in kwargs:
logger.warning(
"Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
- inspect.currentframe().f_lineno, # type: ignore [union-attr]
+ inspect.currentframe().f_lineno, # type: ignore[union-attr]
)
kwargs = {
**kwargs,
@@ -120,7 +120,7 @@ def _handle_local_files(
return kwargs
try:
- file_content = sync_client.get_file_content(normalized_path, file_type) # type: ignore [arg-type] file_type was checked above
+ file_content = sync_client.get_file_content(normalized_path, file_type) # type: ignore [arg-type]
kwargs[file_type] = file_content
except HumanloopRuntimeError as e:
raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
@@ -191,7 +191,7 @@ def overload_client(
"""Overloads client methods to add tracing, local file handling, and evaluation context."""
# Store original log method as _log for all clients. Used in flow decorator
if hasattr(client, "log") and not hasattr(client, "_log"):
- client._log = client.log # type: ignore [attr-defined]
+ client._log = client.log # type: ignore[attr-defined]
# Create a closure to capture sync_client and use_local_files
def log_wrapper(self: Any, **kwargs) -> LogResponseType:
@@ -205,7 +205,7 @@ def log_wrapper(self: Any, **kwargs) -> LogResponseType:
logger.error("sync_client is None but client has call method and use_local_files=%s", use_local_files)
raise HumanloopRuntimeError("sync_client is required for clients that support call operations")
if hasattr(client, "call") and not hasattr(client, "_call"):
- client._call = client.call # type: ignore [attr-defined]
+ client._call = client.call # type: ignore[attr-defined]
# Create a closure to capture sync_client and use_local_files
def call_wrapper(self: Any, **kwargs) -> CallResponseType:
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
index 97c4a28f..d71f1568 100644
--- a/src/humanloop/sync/sync_client.py
+++ b/src/humanloop/sync/sync_client.py
@@ -343,7 +343,7 @@ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple
)
try:
- if path is None:
+ if normalized_path is None or path is None: # path being None means normalized_path is None, but we check both for improved type safety
# Pull all files from the root
logger.debug("Pulling all files from root")
successful_files, failed_files = self._pull_directory(
From 76e8ac9b3a946e65a6006e3300851ce3a91649b1 Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 16:43:46 +0100
Subject: [PATCH 38/39] Add comment explaining mypy ignore directive for
arg-type
---
src/humanloop/overload.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index dc02488b..92c83e6b 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -120,7 +120,7 @@ def _handle_local_files(
return kwargs
try:
- file_content = sync_client.get_file_content(normalized_path, file_type) # type: ignore [arg-type]
+ file_content = sync_client.get_file_content(normalized_path, file_type) # type: ignore[arg-type] # file_type was checked above
kwargs[file_type] = file_content
except HumanloopRuntimeError as e:
raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
From 3fd8223543b83a0219f2e3555fc70b38e885dedf Mon Sep 17 00:00:00 2001
From: Ale Pouroullis
Date: Tue, 13 May 2025 18:33:24 +0100
Subject: [PATCH 39/39] Remove test file
---
test.py | 0
1 file changed, 0 insertions(+), 0 deletions(-)
delete mode 100644 test.py
diff --git a/test.py b/test.py
deleted file mode 100644
index e69de29b..00000000