diff --git a/.fernignore b/.fernignore
index 112f779b..fd7adc81 100644
--- a/.fernignore
+++ b/.fernignore
@@ -13,10 +13,13 @@ mypy.ini
README.md
src/humanloop/decorators
src/humanloop/otel
+src/humanloop/sync
+src/humanloop/cli
+pytest.ini
## Tests
-tests/
+tests/custom
## CI
diff --git a/.gitignore b/.gitignore
index a55ede77..f5cda9d9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,5 @@ poetry.toml
.env
tests/assets/*.jsonl
tests/assets/*.parquet
+# Ignore humanloop directory which could mistakenly be committed when testing sync functionality as it's used as the default sync directory
+humanloop
diff --git a/poetry.lock b/poetry.lock
index cfe8a240..b105f29f 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "annotated-types"
@@ -6,6 +6,7 @@ version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
@@ -13,13 +14,14 @@ files = [
[[package]]
name = "anthropic"
-version = "0.50.0"
+version = "0.51.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
- {file = "anthropic-0.50.0-py3-none-any.whl", hash = "sha256:defbd79327ca2fa61fd7b9eb2f1627dfb1f69c25d49288c52e167ddb84574f80"},
- {file = "anthropic-0.50.0.tar.gz", hash = "sha256:42175ec04ce4ff2fa37cd436710206aadff546ee99d70d974699f59b49adc66f"},
+ {file = "anthropic-0.51.0-py3-none-any.whl", hash = "sha256:b8b47d482c9aa1f81b923555cebb687c2730309a20d01be554730c8302e0f62a"},
+ {file = "anthropic-0.51.0.tar.gz", hash = "sha256:6f824451277992af079554430d5b2c8ff5bc059cc2c968cdc3f06824437da201"},
]
[package.dependencies]
@@ -41,6 +43,7 @@ version = "4.9.0"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "anyio-4.9.0-py3-none-any.whl", hash = "sha256:9f76d541cad6e36af7beb62e978876f3b41e3e04f2c1fbf0884604c0a9c4d93c"},
{file = "anyio-4.9.0.tar.gz", hash = "sha256:673c0c244e15788651a4ff38710fea9675823028a6f08a5eda409e0c9840a028"},
@@ -54,7 +57,7 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
doc = ["Sphinx (>=8.2,<9.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"]
-test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"]
+test = ["anyio[trio]", "blockbuster (>=1.5.23)", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\" and python_version < \"3.14\""]
trio = ["trio (>=0.26.1)"]
[[package]]
@@ -63,18 +66,19 @@ version = "25.3.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"},
{file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"},
]
[package.extras]
-benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"]
-tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
-tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"]
+tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"]
+tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""]
[[package]]
name = "certifi"
@@ -82,6 +86,7 @@ version = "2025.4.26"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"},
{file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"},
@@ -89,111 +94,128 @@ files = [
[[package]]
name = "charset-normalizer"
-version = "3.4.1"
+version = "3.4.2"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
+files = [
+ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9cbfacf36cb0ec2897ce0ebc5d08ca44213af24265bd56eca54bee7923c48fd6"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18dd2e350387c87dabe711b86f83c9c78af772c748904d372ade190b5c7c9d4d"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8075c35cd58273fee266c58c0c9b670947c19df5fb98e7b66710e04ad4e9ff86"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5bf4545e3b962767e5c06fe1738f951f77d27967cb2caa64c28be7c4563e162c"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a6ab32f7210554a96cd9e33abe3ddd86732beeafc7a28e9955cdf22ffadbab0"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:b33de11b92e9f75a2b545d6e9b6f37e398d86c3e9e9653c4864eb7e89c5773ef"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:8755483f3c00d6c9a77f490c17e6ab0c8729e39e6390328e42521ef175380ae6"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:68a328e5f55ec37c57f19ebb1fdc56a248db2e3e9ad769919a58672958e8f366"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:21b2899062867b0e1fde9b724f8aecb1af14f2778d69aacd1a5a1853a597a5db"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-win32.whl", hash = "sha256:e8082b26888e2f8b36a042a58307d5b917ef2b1cacab921ad3323ef91901c71a"},
+ {file = "charset_normalizer-3.4.2-cp310-cp310-win_amd64.whl", hash = "sha256:f69a27e45c43520f5487f27627059b64aaf160415589230992cec34c5e18a509"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:be1e352acbe3c78727a16a455126d9ff83ea2dfdcbc83148d2982305a04714c2"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa88ca0b1932e93f2d961bf3addbb2db902198dca337d88c89e1559e066e7645"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d524ba3f1581b35c03cb42beebab4a13e6cdad7b36246bd22541fa585a56cccd"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28a1005facc94196e1fb3e82a3d442a9d9110b8434fc1ded7a24a2983c9888d8"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fdb20a30fe1175ecabed17cbf7812f7b804b8a315a25f24678bcdf120a90077f"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0f5d9ed7f254402c9e7d35d2f5972c9bbea9040e99cd2861bd77dc68263277c7"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:efd387a49825780ff861998cd959767800d54f8308936b21025326de4b5a42b9"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:f0aa37f3c979cf2546b73e8222bbfa3dc07a641585340179d768068e3455e544"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:e70e990b2137b29dc5564715de1e12701815dacc1d056308e2b17e9095372a82"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:0c8c57f84ccfc871a48a47321cfa49ae1df56cd1d965a09abe84066f6853b9c0"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6b66f92b17849b85cad91259efc341dce9c1af48e2173bf38a85c6329f1033e5"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-win32.whl", hash = "sha256:daac4765328a919a805fa5e2720f3e94767abd632ae410a9062dff5412bae65a"},
+ {file = "charset_normalizer-3.4.2-cp311-cp311-win_amd64.whl", hash = "sha256:e53efc7c7cee4c1e70661e2e112ca46a575f90ed9ae3fef200f2a25e954f4b28"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c"},
+ {file = "charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:926ca93accd5d36ccdabd803392ddc3e03e6d4cd1cf17deff3b989ab8e9dbcf0"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eba9904b0f38a143592d9fc0e19e2df0fa2e41c3c3745554761c5f6447eedabf"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3fddb7e2c84ac87ac3a947cb4e66d143ca5863ef48e4a5ecb83bd48619e4634e"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98f862da73774290f251b9df8d11161b6cf25b599a66baf087c1ffe340e9bfd1"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c9379d65defcab82d07b2a9dfbfc2e95bc8fe0ebb1b176a3190230a3ef0e07c"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e635b87f01ebc977342e2697d05b56632f5f879a4f15955dfe8cef2448b51691"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:1c95a1e2902a8b722868587c0e1184ad5c55631de5afc0eb96bc4b0d738092c0"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ef8de666d6179b009dce7bcb2ad4c4a779f113f12caf8dc77f0162c29d20490b"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:32fc0341d72e0f73f80acb0a2c94216bd704f4f0bce10aedea38f30502b271ff"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:289200a18fa698949d2b39c671c2cc7a24d44096784e76614899a7ccf2574b7b"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a476b06fbcf359ad25d34a057b7219281286ae2477cc5ff5e3f70a246971148"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-win32.whl", hash = "sha256:aaeeb6a479c7667fbe1099af9617c83aaca22182d6cf8c53966491a0f1b7ffb7"},
+ {file = "charset_normalizer-3.4.2-cp313-cp313-win_amd64.whl", hash = "sha256:aa6af9e7d59f9c12b33ae4e9450619cf2488e2bbe9b44030905877f0b2324980"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1cad5f45b3146325bb38d6855642f6fd609c3f7cad4dbaf75549bf3b904d3184"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b2680962a4848b3c4f155dc2ee64505a9c57186d0d56b43123b17ca3de18f0fa"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:36b31da18b8890a76ec181c3cf44326bf2c48e36d393ca1b72b3f484113ea344"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f4074c5a429281bf056ddd4c5d3b740ebca4d43ffffe2ef4bf4d2d05114299da"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9e36a97bee9b86ef9a1cf7bb96747eb7a15c2f22bdb5b516434b00f2a599f02"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:1b1bde144d98e446b056ef98e59c256e9294f6b74d7af6846bf5ffdafd687a7d"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:915f3849a011c1f593ab99092f3cecfcb4d65d8feb4a64cf1bf2d22074dc0ec4"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:fb707f3e15060adf5b7ada797624a6c6e0138e2a26baa089df64c68ee98e040f"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:25a23ea5c7edc53e0f29bae2c44fcb5a1aa10591aae107f2a2b2583a9c5cbc64"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:770cab594ecf99ae64c236bc9ee3439c3f46be49796e265ce0cc8bc17b10294f"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-win32.whl", hash = "sha256:6a0289e4589e8bdfef02a80478f1dfcb14f0ab696b5a00e1f4b8a14a307a3c58"},
+ {file = "charset_normalizer-3.4.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6fc1f5b51fa4cecaa18f2bd7a003f3dd039dd615cd69a2afd6d3b19aed6775f2"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:76af085e67e56c8816c3ccf256ebd136def2ed9654525348cfa744b6802b69eb"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e45ba65510e2647721e35323d6ef54c7974959f6081b58d4ef5d87c60c84919a"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:046595208aae0120559a67693ecc65dd75d46f7bf687f159127046628178dc45"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75d10d37a47afee94919c4fab4c22b9bc2a8bf7d4f46f87363bcf0573f3ff4f5"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6333b3aa5a12c26b2a4d4e7335a28f1475e0e5e17d69d55141ee3cab736f66d1"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e8323a9b031aa0393768b87f04b4164a40037fb2a3c11ac06a03ffecd3618027"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:24498ba8ed6c2e0b56d4acbf83f2d989720a93b41d712ebd4f4979660db4417b"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:844da2b5728b5ce0e32d863af26f32b5ce61bc4273a9c720a9f3aa9df73b1455"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:65c981bdbd3f57670af8b59777cbfae75364b483fa8a9f420f08094531d54a01"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:3c21d4fca343c805a52c0c78edc01e3477f6dd1ad7c47653241cf2a206d4fc58"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:dc7039885fa1baf9be153a0626e337aa7ec8bf96b0128605fb0d77788ddc1681"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-win32.whl", hash = "sha256:8272b73e1c5603666618805fe821edba66892e2870058c94c53147602eab29c7"},
+ {file = "charset_normalizer-3.4.2-cp38-cp38-win_amd64.whl", hash = "sha256:70f7172939fdf8790425ba31915bfbe8335030f05b9913d7ae00a87d4395620a"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:005fa3432484527f9732ebd315da8da8001593e2cf46a3d817669f062c3d9ed4"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e92fca20c46e9f5e1bb485887d074918b13543b1c2a1185e69bb8d17ab6236a7"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:50bf98d5e563b83cc29471fa114366e6806bc06bc7a25fd59641e41445327836"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:721c76e84fe669be19c5791da68232ca2e05ba5185575086e384352e2c309597"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82d8fd25b7f4675d0c47cf95b594d4e7b158aca33b76aa63d07186e13c0e0ab7"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b3daeac64d5b371dea99714f08ffc2c208522ec6b06fbc7866a450dd446f5c0f"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:dccab8d5fa1ef9bfba0590ecf4d46df048d18ffe3eec01eeb73a42e0d9e7a8ba"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:aaf27faa992bfee0264dc1f03f4c75e9fcdda66a519db6b957a3f826e285cf12"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:eb30abc20df9ab0814b5a2524f23d75dcf83cde762c161917a2b4b7b55b1e518"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:c72fbbe68c6f32f251bdc08b8611c7b3060612236e960ef848e0a517ddbe76c5"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:982bb1e8b4ffda883b3d0a521e23abcd6fd17418f6d2c4118d257a10199c0ce3"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-win32.whl", hash = "sha256:43e0933a0eff183ee85833f341ec567c0980dae57c464d8a508e1b2ceb336471"},
+ {file = "charset_normalizer-3.4.2-cp39-cp39-win_amd64.whl", hash = "sha256:d11b54acf878eef558599658b0ffca78138c8c3655cf4f3a4a673c437e67732e"},
+ {file = "charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0"},
+ {file = "charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.8"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+groups = ["main"]
files = [
- {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
- {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
- {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
- {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
- {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
- {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
- {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
- {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
- {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
- {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
+ {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"},
+ {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"},
]
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
[[package]]
name = "cohere"
version = "5.15.0"
description = ""
optional = false
python-versions = "<4.0,>=3.9"
+groups = ["dev"]
files = [
{file = "cohere-5.15.0-py3-none-any.whl", hash = "sha256:22ff867c2a6f2fc2b585360c6072f584f11f275ef6d9242bac24e0fa2df1dfb5"},
{file = "cohere-5.15.0.tar.gz", hash = "sha256:e802d4718ddb0bb655654382ebbce002756a3800faac30296cde7f1bdc6ff2cc"},
@@ -216,10 +238,12 @@ version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+groups = ["main", "dev"]
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
+markers = {main = "platform_system == \"Windows\"", dev = "platform_system == \"Windows\" or sys_platform == \"win32\""}
[[package]]
name = "deepdiff"
@@ -227,6 +251,7 @@ version = "8.4.2"
description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "deepdiff-8.4.2-py3-none-any.whl", hash = "sha256:7e39e5b26f3747c54f9d0e8b9b29daab670c3100166b77cc0185d5793121b099"},
{file = "deepdiff-8.4.2.tar.gz", hash = "sha256:5c741c0867ebc7fcb83950ad5ed958369c17f424e14dee32a11c56073f4ee92a"},
@@ -245,6 +270,7 @@ version = "1.2.18"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+groups = ["main"]
files = [
{file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"},
{file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"},
@@ -254,7 +280,7 @@ files = [
wrapt = ">=1.10,<2"
[package.extras]
-dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools", "tox"]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
[[package]]
name = "distro"
@@ -262,6 +288,7 @@ version = "1.9.0"
description = "Distro - an OS platform information API"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2"},
{file = "distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed"},
@@ -273,6 +300,8 @@ version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
@@ -281,12 +310,28 @@ files = [
[package.extras]
test = ["pytest (>=6)"]
+[[package]]
+name = "execnet"
+version = "2.1.1"
+description = "execnet: rapid multi-Python deployment"
+optional = false
+python-versions = ">=3.8"
+groups = ["dev"]
+files = [
+ {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"},
+ {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"},
+]
+
+[package.extras]
+testing = ["hatch", "pre-commit", "pytest", "tox"]
+
[[package]]
name = "fastavro"
version = "1.10.0"
description = "Fast read/write of AVRO files"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"},
{file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"},
@@ -333,6 +378,7 @@ version = "3.18.0"
description = "A platform independent file lock."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"},
{file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"},
@@ -341,7 +387,7 @@ files = [
[package.extras]
docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"]
-typing = ["typing-extensions (>=4.12.2)"]
+typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""]
[[package]]
name = "fsspec"
@@ -349,6 +395,7 @@ version = "2025.3.2"
description = "File-system specification"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "fsspec-2025.3.2-py3-none-any.whl", hash = "sha256:2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711"},
{file = "fsspec-2025.3.2.tar.gz", hash = "sha256:e52c77ef398680bbd6a98c0e628fbc469491282981209907bbc8aea76a04fdc6"},
@@ -384,13 +431,14 @@ tqdm = ["tqdm"]
[[package]]
name = "groq"
-version = "0.23.1"
+version = "0.24.0"
description = "The official Python library for the groq API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
- {file = "groq-0.23.1-py3-none-any.whl", hash = "sha256:05fa38c3d0ad03c19c6185f98f6a73901c2a463e844fd067b79f7b05c8346946"},
- {file = "groq-0.23.1.tar.gz", hash = "sha256:952e34895f9bfb78ab479e495d77b32180262e5c42f531ce3a1722d6e5a04dfb"},
+ {file = "groq-0.24.0-py3-none-any.whl", hash = "sha256:0020e6b0b2b267263c9eb7c318deef13c12f399c6525734200b11d777b00088e"},
+ {file = "groq-0.24.0.tar.gz", hash = "sha256:e821559de8a77fb81d2585b3faec80ff923d6d64fd52339b33f6c94997d6f7f5"},
]
[package.dependencies]
@@ -407,17 +455,41 @@ version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
+[[package]]
+name = "hf-xet"
+version = "1.1.0"
+description = ""
+optional = false
+python-versions = ">=3.8"
+groups = ["main", "dev"]
+markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""
+files = [
+ {file = "hf_xet-1.1.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:0322c42551e275fcb7949c083a54a81b2898e50787c9aa74284fcb8d2c58c12c"},
+ {file = "hf_xet-1.1.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:667153a0304ac2debf2af95a8ff7687186f885b493f4cd16344869af270cd110"},
+ {file = "hf_xet-1.1.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:995eeffb119636ea617b96c7d7bf3c3f5ea8727fa57974574e25d700b8532d48"},
+ {file = "hf_xet-1.1.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3aee847da362393331f515c4010d0aaa1c2669acfcca1f4b28946d6949cc0086"},
+ {file = "hf_xet-1.1.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:68c5813a6074aa36e12ef5983230e3b03148cce61e0fcdd294096493795565b4"},
+ {file = "hf_xet-1.1.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:4ee9222bf9274b1c198b88a929de0b5a49349c4962d89c5b3b2f0f7f47d9761c"},
+ {file = "hf_xet-1.1.0-cp37-abi3-win_amd64.whl", hash = "sha256:73153eab9abf3d6973b21e94a67ccba5d595c3e12feb8c0bf50be02964e7f126"},
+ {file = "hf_xet-1.1.0.tar.gz", hash = "sha256:a7c2a4c2b6eee9ce0a1a367a82b60d95ba634420ef1c250addad7aa4af419cf4"},
+]
+
+[package.extras]
+tests = ["pytest"]
+
[[package]]
name = "httpcore"
version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
@@ -439,6 +511,7 @@ version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
@@ -451,7 +524,7 @@ httpcore = "==1.*"
idna = "*"
[package.extras]
-brotli = ["brotli", "brotlicffi"]
+brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
@@ -463,6 +536,7 @@ version = "0.4.0"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "httpx-sse-0.4.0.tar.gz", hash = "sha256:1e81a3a3070ce322add1d3529ed42eb5f70817f45ed6ec915ab753f961139721"},
{file = "httpx_sse-0.4.0-py3-none-any.whl", hash = "sha256:f329af6eae57eaa2bdfd962b42524764af68075ea87370a2de920af5341e318f"},
@@ -470,18 +544,20 @@ files = [
[[package]]
name = "huggingface-hub"
-version = "0.30.2"
+version = "0.31.1"
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
optional = false
python-versions = ">=3.8.0"
+groups = ["main", "dev"]
files = [
- {file = "huggingface_hub-0.30.2-py3-none-any.whl", hash = "sha256:68ff05969927058cfa41df4f2155d4bb48f5f54f719dd0390103eefa9b191e28"},
- {file = "huggingface_hub-0.30.2.tar.gz", hash = "sha256:9a7897c5b6fd9dad3168a794a8998d6378210f5b9688d0dfc180b1a228dc2466"},
+ {file = "huggingface_hub-0.31.1-py3-none-any.whl", hash = "sha256:43f73124819b48b42d140cbc0d7a2e6bd15b2853b1b9d728d4d55ad1750cac5b"},
+ {file = "huggingface_hub-0.31.1.tar.gz", hash = "sha256:492bb5f545337aa9e2f59b75ef4c5f535a371e8958a6ce90af056387e67f1180"},
]
[package.dependencies]
filelock = "*"
fsspec = ">=2023.5.0"
+hf-xet = {version = ">=1.1.0,<2.0.0", markers = "platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"arm64\" or platform_machine == \"aarch64\""}
packaging = ">=20.9"
pyyaml = ">=5.1"
requests = "*"
@@ -494,7 +570,7 @@ cli = ["InquirerPy (==0.3.4)"]
dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "fastapi", "gradio (>=4.0.0)", "jedi", "libcst (==1.4.0)", "mypy (==1.5.1)", "numpy", "pytest (>=8.1.1,<8.2.2)", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-mock", "pytest-rerunfailures", "pytest-vcr", "pytest-xdist", "ruff (>=0.9.0)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
hf-transfer = ["hf-transfer (>=0.1.4)"]
-hf-xet = ["hf-xet (>=0.1.4)"]
+hf-xet = ["hf-xet (>=1.1.0,<2.0.0)"]
inference = ["aiohttp"]
quality = ["libcst (==1.4.0)", "mypy (==1.5.1)", "ruff (>=0.9.0)"]
tensorflow = ["graphviz", "pydot", "tensorflow"]
@@ -509,6 +585,7 @@ version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
+groups = ["main", "dev"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
@@ -523,6 +600,7 @@ version = "8.6.1"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "importlib_metadata-8.6.1-py3-none-any.whl", hash = "sha256:02a89390c1e15fdfdc0d7c6b25cb3e62650d0494005c97d6f148bf5b9787525e"},
{file = "importlib_metadata-8.6.1.tar.gz", hash = "sha256:310b41d755445d74569f993ccfc22838295d9fe005425094fad953d7f15c8580"},
@@ -532,12 +610,12 @@ files = [
zipp = ">=3.20"
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
perf = ["ipython"]
-test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
+test = ["flufl.flake8", "importlib_resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]
[[package]]
@@ -546,6 +624,7 @@ version = "2.1.0"
description = "brain-dead simple config-ini parsing"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"},
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
@@ -557,6 +636,7 @@ version = "0.9.0"
description = "Fast iterable JSON parser."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "jiter-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:816ec9b60fdfd1fec87da1d7ed46c66c44ffec37ab2ef7de5b147b2fce3fd5ad"},
{file = "jiter-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b1d3086f8a3ee0194ecf2008cf81286a5c3e540d977fa038ff23576c023c0ea"},
@@ -642,6 +722,7 @@ version = "4.23.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "jsonschema-4.23.0-py3-none-any.whl", hash = "sha256:fbadb6f8b144a8f8cf9f0b89ba94501d143e50411a1278633f56a7acf7fd5566"},
{file = "jsonschema-4.23.0.tar.gz", hash = "sha256:d71497fef26351a33265337fa77ffeb82423f3ea21283cd9467bb03999266bc4"},
@@ -663,6 +744,7 @@ version = "2025.4.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af"},
{file = "jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608"},
@@ -677,6 +759,7 @@ version = "5.1.0"
description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions."
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:eaf4ac5c6ee18ca9232238364d7f2a213278ae5ca97897cafaa123fcc7bb8bec"},
{file = "mmh3-5.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:48f9aa8ccb9ad1d577a16104834ac44ff640d8de8c0caed09a2300df7ce8460a"},
@@ -775,6 +858,7 @@ version = "1.0.1"
description = "Optional static typing for Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "mypy-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:71a808334d3f41ef011faa5a5cd8153606df5fc0b56de5b2e89566c8093a0c9a"},
{file = "mypy-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:920169f0184215eef19294fa86ea49ffd4635dedfdea2b57e45cb4ee85d5ccaf"},
@@ -821,6 +905,7 @@ version = "1.1.0"
description = "Type system extensions for programs checked with the mypy type checker."
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"},
{file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"},
@@ -832,6 +917,7 @@ version = "1.26.4"
description = "Fundamental package for array computing in Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"},
{file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"},
@@ -873,13 +959,14 @@ files = [
[[package]]
name = "openai"
-version = "1.76.2"
+version = "1.77.0"
description = "The official Python library for the openai API"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
- {file = "openai-1.76.2-py3-none-any.whl", hash = "sha256:9c1d9ad59e6e3bea7205eedc9ca66eeebae18d47b527e505a2b0d2fb1538e26e"},
- {file = "openai-1.76.2.tar.gz", hash = "sha256:f430c8b848775907405c6eff54621254c96f6444c593c097e0cc3a9f8fdda96f"},
+ {file = "openai-1.77.0-py3-none-any.whl", hash = "sha256:07706e91eb71631234996989a8ea991d5ee56f0744ef694c961e0824d4f39218"},
+ {file = "openai-1.77.0.tar.gz", hash = "sha256:897969f927f0068b8091b4b041d1f8175bcf124f7ea31bab418bf720971223bc"},
]
[package.dependencies]
@@ -903,6 +990,7 @@ version = "1.32.1"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_api-1.32.1-py3-none-any.whl", hash = "sha256:bbd19f14ab9f15f0e85e43e6a958aa4cb1f36870ee62b7fd205783a112012724"},
{file = "opentelemetry_api-1.32.1.tar.gz", hash = "sha256:a5be71591694a4d9195caf6776b055aa702e964d961051a0715d05f8632c32fb"},
@@ -918,6 +1006,7 @@ version = "0.53b1"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_instrumentation-0.53b1-py3-none-any.whl", hash = "sha256:c07850cecfbc51e8b357f56d5886ae5ccaa828635b220d0f5e78f941ea9a83ca"},
{file = "opentelemetry_instrumentation-0.53b1.tar.gz", hash = "sha256:0e69ca2c75727e8a300de671c4a2ec0e86e63a8e906beaa5d6c9f5228e8687e5"},
@@ -931,13 +1020,14 @@ wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-anthropic"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Anthropic instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_anthropic-0.40.2-py3-none-any.whl", hash = "sha256:94e3474dfcb65ada10a5d83056e9e43dc0afbaae43a55bba6b7712672e28d21a"},
- {file = "opentelemetry_instrumentation_anthropic-0.40.2.tar.gz", hash = "sha256:949156556ed4d908196984fac1a8ea3d16edcf9d7395d85729a0e7712b2f818f"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.3-py3-none-any.whl", hash = "sha256:152a7968d86ade48ffb4df526129def598e8e3eeb1c4fb11a5e6a3bbc94c0fd4"},
+ {file = "opentelemetry_instrumentation_anthropic-0.40.3.tar.gz", hash = "sha256:5e40a9d3342d800180d29e028f3d1aa5db3e0ec482362a7eef054ee500fb8d4f"},
]
[package.dependencies]
@@ -948,13 +1038,14 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-bedrock"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Bedrock instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_bedrock-0.40.2-py3-none-any.whl", hash = "sha256:a12331e2cd77eb61f954acbaa50cdf31954f2b315b52da6354284ce0b83f2773"},
- {file = "opentelemetry_instrumentation_bedrock-0.40.2.tar.gz", hash = "sha256:a1d49d41d8435ba368698a884ffbd4fbda1f1325d6961b805706ee0bbbc6547f"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.3-py3-none-any.whl", hash = "sha256:cc8ea0358f57876ad12bbcbc9b1ace4b97bf9d8d5d7703a7a55135811b0b433a"},
+ {file = "opentelemetry_instrumentation_bedrock-0.40.3.tar.gz", hash = "sha256:bcb5060060d0ec25bd8c08332eadd23d57ceea1e042fed5e7a7664e5a2fcb817"},
]
[package.dependencies]
@@ -967,13 +1058,14 @@ tokenizers = ">=0.13.0"
[[package]]
name = "opentelemetry-instrumentation-cohere"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Cohere instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_cohere-0.40.2-py3-none-any.whl", hash = "sha256:96fde68b0d8ce68f272f4c54f30178cb22cbadb196735a3943cc328891a9d508"},
- {file = "opentelemetry_instrumentation_cohere-0.40.2.tar.gz", hash = "sha256:df3cac041b0769540f2362d8280e7f0179ff1446e47fb2542f22d91822c30fc4"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.3-py3-none-any.whl", hash = "sha256:d39d058ae5cffe02908c1c242b71dc449d07df71c60d7a37159a898e38c6a15c"},
+ {file = "opentelemetry_instrumentation_cohere-0.40.3.tar.gz", hash = "sha256:23f6f237f7cdef661549b3f7d02dc8b1c69ce0cbf3e0050c05ce3f443458fe35"},
]
[package.dependencies]
@@ -984,13 +1076,14 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-groq"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Groq instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_groq-0.40.2-py3-none-any.whl", hash = "sha256:32e9220439b8356f33edbafbfd8b7f4ea063c1465ff29389abefcc93eca19530"},
- {file = "opentelemetry_instrumentation_groq-0.40.2.tar.gz", hash = "sha256:c127d089a5aec9f49ed9ba6bdbd00d67af596040a778eaef3641cd18d114ae93"},
+ {file = "opentelemetry_instrumentation_groq-0.40.3-py3-none-any.whl", hash = "sha256:3c3c324ab0b49323f268dc54b60fe06aaee04b5bac0f901a70251d4931b611bc"},
+ {file = "opentelemetry_instrumentation_groq-0.40.3.tar.gz", hash = "sha256:b246b258d28ac5af429688b9948c37ced88cba4a7f8f99f629f9b42fcbe36e47"},
]
[package.dependencies]
@@ -1001,13 +1094,14 @@ opentelemetry-semantic-conventions-ai = "0.4.5"
[[package]]
name = "opentelemetry-instrumentation-openai"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry OpenAI instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_openai-0.40.2-py3-none-any.whl", hash = "sha256:62fe130f16f2933f1db75f9a14807bb08444534fd8d2e6ad4668ee8b1c3968a5"},
- {file = "opentelemetry_instrumentation_openai-0.40.2.tar.gz", hash = "sha256:61e46e7a9e3f5d7fb0cef82f1fd7bd6a26848a28ec384249875fe5622ddbf622"},
+ {file = "opentelemetry_instrumentation_openai-0.40.3-py3-none-any.whl", hash = "sha256:77e55609fef78d1a81a61aeac667b6423d19f3f3936c4a219963fba0559dae44"},
+ {file = "opentelemetry_instrumentation_openai-0.40.3.tar.gz", hash = "sha256:8e7f260f3c3e25f445281238552c80cbd724c2d61fee4ad9360a86a2e0015114"},
]
[package.dependencies]
@@ -1019,13 +1113,14 @@ tiktoken = ">=0.6.0,<1"
[[package]]
name = "opentelemetry-instrumentation-replicate"
-version = "0.40.2"
+version = "0.40.3"
description = "OpenTelemetry Replicate instrumentation"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
- {file = "opentelemetry_instrumentation_replicate-0.40.2-py3-none-any.whl", hash = "sha256:ab6234081ae9803981e8e6302524bd25fc3d0e38e9a939bee6ad15f85405ccb8"},
- {file = "opentelemetry_instrumentation_replicate-0.40.2.tar.gz", hash = "sha256:e7edf785c07e94c951f8268ff1204e00b1fcc86059b3475ac04e01b74f9785c6"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.3-py3-none-any.whl", hash = "sha256:24a3ae27137521a4f4cfa523c36be7cedf2ad57eed9fef5f1bfb2a3e2c8aee9e"},
+ {file = "opentelemetry_instrumentation_replicate-0.40.3.tar.gz", hash = "sha256:b62dcee6b8afe6dc30ad98b18014fdd1b6851fc5bd6d0c4dc6c40bff16ce407d"},
]
[package.dependencies]
@@ -1040,6 +1135,7 @@ version = "1.32.1"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_proto-1.32.1-py3-none-any.whl", hash = "sha256:fe56df31033ab0c40af7525f8bf4c487313377bbcfdf94184b701a8ccebc800e"},
{file = "opentelemetry_proto-1.32.1.tar.gz", hash = "sha256:bc6385ccf87768f029371535312071a2d09e6c9ebf119ac17dbc825a6a56ba53"},
@@ -1054,6 +1150,7 @@ version = "1.32.1"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_sdk-1.32.1-py3-none-any.whl", hash = "sha256:bba37b70a08038613247bc42beee5a81b0ddca422c7d7f1b097b32bf1c7e2f17"},
{file = "opentelemetry_sdk-1.32.1.tar.gz", hash = "sha256:8ef373d490961848f525255a42b193430a0637e064dd132fd2a014d94792a092"},
@@ -1070,6 +1167,7 @@ version = "0.53b1"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions-0.53b1-py3-none-any.whl", hash = "sha256:21df3ed13f035f8f3ea42d07cbebae37020367a53b47f1ebee3b10a381a00208"},
{file = "opentelemetry_semantic_conventions-0.53b1.tar.gz", hash = "sha256:4c5a6fede9de61211b2e9fc1e02e8acacce882204cd770177342b6a3be682992"},
@@ -1085,6 +1183,7 @@ version = "0.4.5"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
+groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions_ai-0.4.5-py3-none-any.whl", hash = "sha256:91e5c776d45190cebd88ea1cef021e231b5c04c448f5473fdaeb310f14e62b11"},
{file = "opentelemetry_semantic_conventions_ai-0.4.5.tar.gz", hash = "sha256:15e2540aa807fb6748f1bdc60da933ee2fb2e40f6dec48fde8facfd9e22550d7"},
@@ -1092,13 +1191,14 @@ files = [
[[package]]
name = "orderly-set"
-version = "5.4.0"
+version = "5.4.1"
description = "Orderly set"
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
- {file = "orderly_set-5.4.0-py3-none-any.whl", hash = "sha256:f0192a7f9ae3385b587b71688353fae491d1ca45878496eb71ea118be1623639"},
- {file = "orderly_set-5.4.0.tar.gz", hash = "sha256:c8ff5ba824abe4eebcbbdd3f646ff3648ad0dd52239319d90056d8d30b6cccdd"},
+ {file = "orderly_set-5.4.1-py3-none-any.whl", hash = "sha256:b5e21d21680bd9ef456885db800c5cb4f76a03879880c0175e1b077fb166fd83"},
+ {file = "orderly_set-5.4.1.tar.gz", hash = "sha256:a1fb5a4fdc5e234e9e8d8e5c1bbdbc4540f4dfe50d12bf17c8bc5dbf1c9c878d"},
]
[[package]]
@@ -1107,6 +1207,7 @@ version = "25.0"
description = "Core utilities for Python packages"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"},
{file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
@@ -1118,6 +1219,7 @@ version = "2.2.3"
description = "Powerful data structures for data analysis, time series, and statistics"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pandas-2.2.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1948ddde24197a0f7add2bdc4ca83bf2b1ef84a1bc8ccffd95eda17fd836ecb5"},
{file = "pandas-2.2.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:381175499d3802cde0eabbaf6324cce0c4f5d52ca6f8c377c29ad442f50f6348"},
@@ -1204,6 +1306,7 @@ version = "1.20.2"
description = "parse() is the opposite of format()"
optional = false
python-versions = "*"
+groups = ["main", "dev"]
files = [
{file = "parse-1.20.2-py2.py3-none-any.whl", hash = "sha256:967095588cb802add9177d0c0b6133b5ba33b1ea9007ca800e526f42a85af558"},
{file = "parse-1.20.2.tar.gz", hash = "sha256:b41d604d16503c79d81af5165155c0b20f6c8d6c559efa66b4b695c3e5a0a0ce"},
@@ -1215,6 +1318,7 @@ version = "0.6.4"
description = "Simplifies to build parse types based on the parse module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,>=2.7"
+groups = ["dev"]
files = [
{file = "parse_type-0.6.4-py2.py3-none-any.whl", hash = "sha256:83d41144a82d6b8541127bf212dd76c7f01baff680b498ce8a4d052a7a5bce4c"},
{file = "parse_type-0.6.4.tar.gz", hash = "sha256:5e1ec10440b000c3f818006033372939e693a9ec0176f446d9303e4db88489a6"},
@@ -1225,9 +1329,9 @@ parse = {version = ">=1.18.0", markers = "python_version >= \"3.0\""}
six = ">=1.15"
[package.extras]
-develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0)", "pytest (>=5.0)", "pytest-cov", "pytest-html (>=1.19.0)", "ruff", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0)", "virtualenv (>=20.0.0)", "wheel"]
+develop = ["build (>=0.5.1)", "coverage (>=4.4)", "pylint", "pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-cov", "pytest-html (>=1.19.0)", "ruff ; python_version >= \"3.7\"", "setuptools", "setuptools-scm", "tox (>=2.8,<4.0)", "twine (>=1.13.0)", "virtualenv (<20.22.0) ; python_version <= \"3.6\"", "virtualenv (>=20.0.0) ; python_version > \"3.6\"", "wheel"]
docs = ["Sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6.0)"]
-testing = ["pytest (<5.0)", "pytest (>=5.0)", "pytest-html (>=1.19.0)"]
+testing = ["pytest (<5.0) ; python_version < \"3.0\"", "pytest (>=5.0) ; python_version >= \"3.0\"", "pytest-html (>=1.19.0)"]
[[package]]
name = "pluggy"
@@ -1235,6 +1339,7 @@ version = "1.5.0"
description = "plugin and hook calling mechanisms for python"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"},
{file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"},
@@ -1250,6 +1355,7 @@ version = "5.29.4"
description = ""
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"},
{file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"},
@@ -1270,6 +1376,7 @@ version = "19.0.1"
description = "Python library for Apache Arrow"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:fc28912a2dc924dddc2087679cc8b7263accc71b9ff025a1362b004711661a69"},
{file = "pyarrow-19.0.1-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:fca15aabbe9b8355800d923cc2e82c8ef514af321e18b437c3d782aa884eaeec"},
@@ -1324,6 +1431,7 @@ version = "2.11.4"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic-2.11.4-py3-none-any.whl", hash = "sha256:d9615eaa9ac5a063471da949c8fc16376a84afb5024688b3ff885693506764eb"},
{file = "pydantic-2.11.4.tar.gz", hash = "sha256:32738d19d63a226a52eed76645a98ee07c1f410ee41d93b4afbfa85ed8111c2d"},
@@ -1337,7 +1445,7 @@ typing-inspection = ">=0.4.0"
[package.extras]
email = ["email-validator (>=2.0.0)"]
-timezone = ["tzdata"]
+timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
[[package]]
name = "pydantic-core"
@@ -1345,6 +1453,7 @@ version = "2.33.2"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"},
{file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"},
@@ -1456,6 +1565,7 @@ version = "7.4.4"
description = "pytest: simple powerful testing with Python"
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
{file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
@@ -1478,6 +1588,7 @@ version = "0.23.8"
description = "Pytest support for asyncio"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "pytest_asyncio-0.23.8-py3-none-any.whl", hash = "sha256:50265d892689a5faefb84df80819d1ecef566eb3549cf915dfb33569359d1ce2"},
{file = "pytest_asyncio-0.23.8.tar.gz", hash = "sha256:759b10b33a6dc61cce40a8bd5205e302978bbbcc00e279a8b61d9a6a3c82e4d3"},
@@ -1496,6 +1607,7 @@ version = "1.7.0"
description = "Adds the ability to retry flaky tests in CI environments"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "pytest_retry-1.7.0-py3-none-any.whl", hash = "sha256:a2dac85b79a4e2375943f1429479c65beb6c69553e7dae6b8332be47a60954f4"},
{file = "pytest_retry-1.7.0.tar.gz", hash = "sha256:f8d52339f01e949df47c11ba9ee8d5b362f5824dff580d3870ec9ae0057df80f"},
@@ -1507,12 +1619,34 @@ pytest = ">=7.0.0"
[package.extras]
dev = ["black", "flake8", "isort", "mypy"]
+[[package]]
+name = "pytest-xdist"
+version = "3.6.1"
+description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
+optional = false
+python-versions = ">=3.8"
+groups = ["dev"]
+files = [
+ {file = "pytest_xdist-3.6.1-py3-none-any.whl", hash = "sha256:9ed4adfb68a016610848639bb7e02c9352d5d9f03d04809919e2dafc3be4cca7"},
+ {file = "pytest_xdist-3.6.1.tar.gz", hash = "sha256:ead156a4db231eec769737f57668ef58a2084a34b2e55c4a8fa20d861107300d"},
+]
+
+[package.dependencies]
+execnet = ">=2.1"
+pytest = ">=7.0.0"
+
+[package.extras]
+psutil = ["psutil (>=3.0)"]
+setproctitle = ["setproctitle"]
+testing = ["filelock"]
+
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
description = "Extensions to the standard Python datetime module"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"},
{file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"},
@@ -1527,6 +1661,7 @@ version = "1.1.0"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "python_dotenv-1.1.0-py3-none-any.whl", hash = "sha256:d7c01d9e2293916c18baf562d95698754b0dbbb5e74d457c45d4f6561fb9d55d"},
{file = "python_dotenv-1.1.0.tar.gz", hash = "sha256:41f90bc6f5f177fb41f53e87666db362025010eb28f60a01c9143bfa33a2b2d5"},
@@ -1541,6 +1676,7 @@ version = "2025.2"
description = "World timezone definitions, modern and historical"
optional = false
python-versions = "*"
+groups = ["dev"]
files = [
{file = "pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00"},
{file = "pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3"},
@@ -1552,6 +1688,7 @@ version = "6.0.2"
description = "YAML parser and emitter for Python"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"},
{file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"},
@@ -1614,6 +1751,7 @@ version = "0.36.2"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0"},
{file = "referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa"},
@@ -1630,6 +1768,7 @@ version = "2024.11.6"
description = "Alternative regular expression module, to replace re."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"},
{file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"},
@@ -1733,6 +1872,7 @@ version = "1.0.6"
description = "Python client for Replicate"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "replicate-1.0.6-py3-none-any.whl", hash = "sha256:d544f837dc7e9dc3b3c1df60a145c7d6f362d6719b719793a44a4be28837103d"},
{file = "replicate-1.0.6.tar.gz", hash = "sha256:b8a0f1649ed4146c3d624e22a418b8c6decce9346cffc110c90fde5995c46e60"},
@@ -1750,6 +1890,7 @@ version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
@@ -1771,6 +1912,7 @@ version = "0.24.0"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "rpds_py-0.24.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:006f4342fe729a368c6df36578d7a348c7c716be1da0a1a0f86e3021f8e98724"},
{file = "rpds_py-0.24.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2d53747da70a4e4b17f559569d5f9506420966083a31c5fbd84e764461c4444b"},
@@ -1894,6 +2036,7 @@ version = "0.5.7"
description = "An extremely fast Python linter and code formatter, written in Rust."
optional = false
python-versions = ">=3.7"
+groups = ["dev"]
files = [
{file = "ruff-0.5.7-py3-none-linux_armv6l.whl", hash = "sha256:548992d342fc404ee2e15a242cdbea4f8e39a52f2e7752d0e4cbe88d2d2f416a"},
{file = "ruff-0.5.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:00cc8872331055ee017c4f1071a8a31ca0809ccc0657da1d154a1d2abac5c0be"},
@@ -1921,6 +2064,7 @@ version = "1.17.0"
description = "Python 2 and 3 compatibility utilities"
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+groups = ["dev"]
files = [
{file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"},
{file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"},
@@ -1932,6 +2076,7 @@ version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
@@ -1943,6 +2088,7 @@ version = "0.9.0"
description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "tiktoken-0.9.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:586c16358138b96ea804c034b8acf3f5d3f0258bd2bc3b0227af4af5d622e382"},
{file = "tiktoken-0.9.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d9c59ccc528c6c5dd51820b3474402f69d9a9e1d656226848ad68a8d5b2e5108"},
@@ -1990,6 +2136,7 @@ version = "0.21.1"
description = ""
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "tokenizers-0.21.1-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:e78e413e9e668ad790a29456e677d9d3aa50a9ad311a40905d6861ba7692cf41"},
{file = "tokenizers-0.21.1-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:cd51cd0a91ecc801633829fcd1fda9cf8682ed3477c6243b9a095539de4aecf3"},
@@ -2022,6 +2169,8 @@ version = "2.2.1"
description = "A lil' TOML parser"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
+markers = "python_version < \"3.11\""
files = [
{file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"},
{file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"},
@@ -2063,6 +2212,7 @@ version = "4.67.1"
description = "Fast, Extensible Progress Meter"
optional = false
python-versions = ">=3.7"
+groups = ["main", "dev"]
files = [
{file = "tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2"},
{file = "tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2"},
@@ -2084,6 +2234,7 @@ version = "4.23.0.20241208"
description = "Typing stubs for jsonschema"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "types_jsonschema-4.23.0.20241208-py3-none-any.whl", hash = "sha256:87934bd9231c99d8eff94cacfc06ba668f7973577a9bd9e1f9de957c5737313e"},
{file = "types_jsonschema-4.23.0.20241208.tar.gz", hash = "sha256:e8b15ad01f290ecf6aea53f93fbdf7d4730e4600313e89e8a7f95622f7e87b7c"},
@@ -2098,6 +2249,7 @@ version = "5.29.1.20250403"
description = "Typing stubs for protobuf"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_protobuf-5.29.1.20250403-py3-none-any.whl", hash = "sha256:c71de04106a2d54e5b2173d0a422058fae0ef2d058d70cf369fb797bf61ffa59"},
{file = "types_protobuf-5.29.1.20250403.tar.gz", hash = "sha256:7ff44f15022119c9d7558ce16e78b2d485bf7040b4fadced4dd069bb5faf77a2"},
@@ -2109,6 +2261,7 @@ version = "2.9.0.20241206"
description = "Typing stubs for python-dateutil"
optional = false
python-versions = ">=3.8"
+groups = ["dev"]
files = [
{file = "types_python_dateutil-2.9.0.20241206-py3-none-any.whl", hash = "sha256:e248a4bc70a486d3e3ec84d0dc30eec3a5f979d6e7ee4123ae043eedbb987f53"},
{file = "types_python_dateutil-2.9.0.20241206.tar.gz", hash = "sha256:18f493414c26ffba692a72369fea7a154c502646301ebfe3d56a04b3767284cb"},
@@ -2120,6 +2273,7 @@ version = "2.32.0.20250328"
description = "Typing stubs for requests"
optional = false
python-versions = ">=3.9"
+groups = ["dev"]
files = [
{file = "types_requests-2.32.0.20250328-py3-none-any.whl", hash = "sha256:72ff80f84b15eb3aa7a8e2625fffb6a93f2ad5a0c20215fc1dcfa61117bcb2a2"},
{file = "types_requests-2.32.0.20250328.tar.gz", hash = "sha256:c9e67228ea103bd811c96984fac36ed2ae8da87a36a633964a21f199d60baf32"},
@@ -2134,6 +2288,7 @@ version = "4.13.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
+groups = ["main", "dev"]
files = [
{file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"},
{file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"},
@@ -2145,6 +2300,7 @@ version = "0.4.0"
description = "Runtime typing introspection tools"
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "typing_inspection-0.4.0-py3-none-any.whl", hash = "sha256:50e72559fcd2a6367a19f7a7e610e6afcb9fac940c650290eed893d61386832f"},
{file = "typing_inspection-0.4.0.tar.gz", hash = "sha256:9765c87de36671694a67904bf2c96e395be9c6439bb6c87b5142569dcdd65122"},
@@ -2159,6 +2315,7 @@ version = "2025.2"
description = "Provider of IANA time zone data"
optional = false
python-versions = ">=2"
+groups = ["dev"]
files = [
{file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"},
{file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"},
@@ -2170,13 +2327,14 @@ version = "2.4.0"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
+groups = ["main", "dev"]
files = [
{file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"},
{file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
@@ -2187,6 +2345,7 @@ version = "1.17.2"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.8"
+groups = ["main"]
files = [
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"},
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"},
@@ -2275,20 +2434,21 @@ version = "3.21.0"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.9"
+groups = ["main"]
files = [
{file = "zipp-3.21.0-py3-none-any.whl", hash = "sha256:ac1bbe05fd2991f160ebce24ffbac5f6d11d83dc90891255885223d42b3cd931"},
{file = "zipp-3.21.0.tar.gz", hash = "sha256:2c9958f6430a2040341a52eb608ed6dd93ef4392e02ffe219417c1b28b5dd1f4"},
]
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "importlib-resources", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
+test = ["big-O", "importlib-resources ; python_version < \"3.9\"", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
type = ["pytest-mypy"]
[metadata]
-lock-version = "2.0"
+lock-version = "2.1"
python-versions = ">=3.9,<4"
-content-hash = "6b18fb6088ede49c2e52a1103a46481d57959171b5f2f6ee13cc3089a3804f5d"
+content-hash = "d9587a36fb01331acd9d45395ab819f88f3402904bf07c324b8cc2b16f2f5d8a"
diff --git a/pyproject.toml b/pyproject.toml
index 9dddf812..66a4c368 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,13 +1,20 @@
+# This section is used by PyPI and follows PEP 621 for package metadata
[project]
name = "humanloop"
+description = "The Humanloop Python Library"
+authors = []
+# This section is used by Poetry for development and building
+# The metadata here is used during development but not published to PyPI
[tool.poetry]
name = "humanloop"
-version = "0.8.36b1"
-description = ""
+version = "0.8.39"
+description = "Humanloop Python SDK"
readme = "README.md"
authors = []
-keywords = []
+packages = [
+ { include = "humanloop", from = "src" },
+]
classifiers = [
"Intended Audience :: Developers",
@@ -26,9 +33,6 @@ classifiers = [
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed"
]
-packages = [
- { include = "humanloop", from = "src"}
-]
[project.urls]
Repository = 'https://github.com/humanloop/humanloop-python'
@@ -53,8 +57,10 @@ protobuf = ">=5.29.3"
pydantic = ">= 1.9.2"
pydantic-core = "^2.18.2"
typing_extensions = ">= 4.0.0"
+click = "^8.0.0"
+python-dotenv = "^1.1.0"
-[tool.poetry.dev-dependencies]
+[tool.poetry.group.dev.dependencies]
mypy = "1.0.1"
pytest = "^7.4.0"
pytest-asyncio = "^0.23.5"
@@ -70,11 +76,11 @@ pandas = "^2.2.0"
parse-type = ">=0.6.4"
pyarrow = "^19.0.0"
pytest-retry = "^1.6.3"
-python-dotenv = "^1.0.1"
replicate = "^1.0.3"
ruff = "^0.5.6"
types-jsonschema = "^4.23.0.20240813"
types-protobuf = "^5.29.1.20250208"
+pytest-xdist = "^3.6.1"
[tool.pytest.ini_options]
testpaths = [ "tests" ]
@@ -86,7 +92,10 @@ plugins = ["pydantic.mypy"]
[tool.ruff]
line-length = 120
+[tool.poetry.scripts]
+humanloop = "humanloop.cli.__main__:cli"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
+
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 00000000..8ab80e5d
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,2 @@
+[pytest]
+addopts = -n auto
diff --git a/reference.md b/reference.md
index 26f361d0..beb8e609 100644
--- a/reference.md
+++ b/reference.md
@@ -202,7 +202,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptLogRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -752,7 +757,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallStreamRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -1026,7 +1036,12 @@ Controls how the model uses tools. The following options are supported:
-
-**prompt:** `typing.Optional[PromptKernelRequestParams]` — Details of your Prompt. A new Prompt version will be created if the provided details are new.
+**prompt:** `typing.Optional[PromptsCallRequestPromptParams]`
+
+The Prompt configuration to use. Two formats are supported:
+- An object representing the details of the Prompt configuration
+- A string representing the raw contents of a .prompt file
+A new Prompt version will be created if the provided details do not match any existing version.
@@ -1269,7 +1284,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Prompts by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Prompts by
@@ -3440,7 +3455,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Tools by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Tools by
@@ -4742,7 +4757,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Datasets by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Datasets by
@@ -6321,7 +6336,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Evaluators by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Evaluators by
@@ -8080,7 +8095,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Flows by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Flows by
@@ -8858,52 +8873,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
-)
+client.agents.log()
```
@@ -9037,7 +9007,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentLogRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9244,20 +9219,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
```
@@ -9468,7 +9431,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentsCallStreamRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -9654,15 +9622,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
-)
+client.agents.call()
```
@@ -9732,7 +9692,12 @@ Controls how the model uses tools. The following options are supported:
-
-**agent:** `typing.Optional[AgentKernelRequestParams]` — Details of your Agent. A new Agent version will be created if the provided details are new.
+**agent:** `typing.Optional[AgentsCallRequestAgentParams]`
+
+The Agent configuration to use. Two formats are supported:
+- An object representing the details of the Agent configuration
+- A string representing the raw contents of a .agent file
+A new Agent version will be created if the provided details do not match any existing version.
@@ -10018,14 +9983,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
```
@@ -10118,14 +10077,7 @@ from humanloop import Humanloop
client = Humanloop(
api_key="YOUR_API_KEY",
)
-response = client.agents.list(
- size=1,
-)
-for item in response:
- yield item
-# alternatively, you can paginate page-by-page
-for page in response.iter_pages():
- yield page
+client.agents.list()
```
@@ -10173,7 +10125,7 @@ for page in response.iter_pages():
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort Agents by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort Agents by
@@ -10241,42 +10193,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
```
@@ -10545,8 +10462,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
```
@@ -10624,10 +10541,8 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
```
@@ -10724,7 +10639,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.get(
- id="ag_1234567890",
+ id="id",
)
```
@@ -10810,7 +10725,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
```
@@ -10880,8 +10795,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
```
@@ -10975,7 +10889,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
```
@@ -11226,7 +11140,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
```
@@ -11299,12 +11213,7 @@ client = Humanloop(
api_key="YOUR_API_KEY",
)
client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
```
@@ -11980,6 +11889,14 @@ client.files.list_files()
-
+**path:** `typing.Optional[str]` — Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
+
+
+
+
+-
+
**template:** `typing.Optional[bool]` — Filter to include only template files.
@@ -12004,7 +11921,7 @@ client.files.list_files()
-
-**sort_by:** `typing.Optional[ProjectSortBy]` — Field to sort files by
+**sort_by:** `typing.Optional[FileSortBy]` — Field to sort files by
@@ -12020,6 +11937,14 @@ client.files.list_files()
-
+**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
@@ -12098,6 +12023,14 @@ client.files.retrieve_by_path(
-
+**include_raw_file_content:** `typing.Optional[bool]` — Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
+
+
+
+
+-
+
**request_options:** `typing.Optional[RequestOptions]` — Request-specific configuration.
diff --git a/src/humanloop/__init__.py b/src/humanloop/__init__.py
index 8485d75c..8e7fa495 100644
--- a/src/humanloop/__init__.py
+++ b/src/humanloop/__init__.py
@@ -93,6 +93,7 @@
FileId,
FilePath,
FileRequest,
+ FileSortBy,
FileType,
FilesToolType,
FlowKernelRequest,
@@ -150,7 +151,6 @@
PopulateTemplateResponseReasoningEffort,
PopulateTemplateResponseStop,
PopulateTemplateResponseTemplate,
- ProjectSortBy,
PromptCallLogResponse,
PromptCallResponse,
PromptCallResponseToolChoice,
@@ -204,6 +204,8 @@
from .errors import UnprocessableEntityError
from . import agents, datasets, directories, evaluations, evaluators, files, flows, logs, prompts, tools
from .agents import (
+ AgentLogRequestAgent,
+ AgentLogRequestAgentParams,
AgentLogRequestToolChoice,
AgentLogRequestToolChoiceParams,
AgentRequestReasoningEffort,
@@ -214,8 +216,12 @@
AgentRequestTemplateParams,
AgentRequestToolsItem,
AgentRequestToolsItemParams,
+ AgentsCallRequestAgent,
+ AgentsCallRequestAgentParams,
AgentsCallRequestToolChoice,
AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgent,
+ AgentsCallStreamRequestAgentParams,
AgentsCallStreamRequestToolChoice,
AgentsCallStreamRequestToolChoiceParams,
)
@@ -242,6 +248,8 @@
)
from .files import RetrieveByPathFilesRetrieveByPathPostResponse, RetrieveByPathFilesRetrieveByPathPostResponseParams
from .prompts import (
+ PromptLogRequestPrompt,
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoice,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoice,
@@ -252,8 +260,12 @@
PromptRequestStopParams,
PromptRequestTemplate,
PromptRequestTemplateParams,
+ PromptsCallRequestPrompt,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoice,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPrompt,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoice,
PromptsCallStreamRequestToolChoiceParams,
)
@@ -461,6 +473,8 @@
"AgentLinkedFileResponseFile",
"AgentLinkedFileResponseFileParams",
"AgentLinkedFileResponseParams",
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoice",
"AgentLogRequestToolChoiceParams",
"AgentLogResponse",
@@ -487,8 +501,12 @@
"AgentResponseTemplateParams",
"AgentResponseToolsItem",
"AgentResponseToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoice",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoice",
"AgentsCallStreamRequestToolChoiceParams",
"AnthropicRedactedThinkingContent",
@@ -622,6 +640,7 @@
"FilePathParams",
"FileRequest",
"FileRequestParams",
+ "FileSortBy",
"FileType",
"FilesToolType",
"FlowKernelRequest",
@@ -725,7 +744,6 @@
"PopulateTemplateResponseStopParams",
"PopulateTemplateResponseTemplate",
"PopulateTemplateResponseTemplateParams",
- "ProjectSortBy",
"PromptCallLogResponse",
"PromptCallLogResponseParams",
"PromptCallResponse",
@@ -742,6 +760,8 @@
"PromptKernelRequestStopParams",
"PromptKernelRequestTemplate",
"PromptKernelRequestTemplateParams",
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogResponse",
@@ -764,8 +784,12 @@
"PromptResponseStopParams",
"PromptResponseTemplate",
"PromptResponseTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
"ProviderApiKeys",
diff --git a/src/humanloop/agents/__init__.py b/src/humanloop/agents/__init__.py
index 04260714..ab2a2f9e 100644
--- a/src/humanloop/agents/__init__.py
+++ b/src/humanloop/agents/__init__.py
@@ -1,25 +1,33 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ AgentLogRequestAgent,
AgentLogRequestToolChoice,
AgentRequestReasoningEffort,
AgentRequestStop,
AgentRequestTemplate,
AgentRequestToolsItem,
+ AgentsCallRequestAgent,
AgentsCallRequestToolChoice,
+ AgentsCallStreamRequestAgent,
AgentsCallStreamRequestToolChoice,
)
from .requests import (
+ AgentLogRequestAgentParams,
AgentLogRequestToolChoiceParams,
AgentRequestReasoningEffortParams,
AgentRequestStopParams,
AgentRequestTemplateParams,
AgentRequestToolsItemParams,
+ AgentsCallRequestAgentParams,
AgentsCallRequestToolChoiceParams,
+ AgentsCallStreamRequestAgentParams,
AgentsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "AgentLogRequestAgent",
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoice",
"AgentLogRequestToolChoiceParams",
"AgentRequestReasoningEffort",
@@ -30,8 +38,12 @@
"AgentRequestTemplateParams",
"AgentRequestToolsItem",
"AgentRequestToolsItemParams",
+ "AgentsCallRequestAgent",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoice",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgent",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoice",
"AgentsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/agents/client.py b/src/humanloop/agents/client.py
index 64f3de62..3cb31092 100644
--- a/src/humanloop/agents/client.py
+++ b/src/humanloop/agents/client.py
@@ -5,29 +5,24 @@
from .raw_client import RawAgentsClient
from ..requests.chat_message import ChatMessageParams
from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
-from ..requests.agent_kernel_request import AgentKernelRequestParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
from ..types.create_agent_log_response import CreateAgentLogResponse
from ..types.agent_log_response import AgentLogResponse
from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.agent_call_stream_response import AgentCallStreamResponse
from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
from ..types.agent_call_response import AgentCallResponse
from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
from ..types.agent_continue_call_response import AgentContinueCallResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
-from ..core.pagination import SyncPager
-from ..types.agent_response import AgentResponse
from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
-from ..core.unchecked_base_model import construct_type
-from ..errors.unprocessable_entity_error import UnprocessableEntityError
-from ..types.http_validation_error import HttpValidationError
-from json.decoder import JSONDecodeError
-from ..core.api_error import ApiError
from ..types.model_endpoints import ModelEndpoints
from .requests.agent_request_template import AgentRequestTemplateParams
from ..types.template_language import TemplateLanguage
@@ -36,6 +31,7 @@
from ..requests.response_format import ResponseFormatParams
from .requests.agent_request_reasoning_effort import AgentRequestReasoningEffortParams
from .requests.agent_request_tools_item import AgentRequestToolsItemParams
+from ..types.agent_response import AgentResponse
from ..types.list_agents import ListAgents
from ..types.file_environment_response import FileEnvironmentResponse
from ..requests.evaluator_activation_deactivation_request_activate_item import (
@@ -47,7 +43,6 @@
from ..types.agent_kernel_request import AgentKernelRequest
from ..core.client_wrapper import AsyncClientWrapper
from .raw_client import AsyncRawAgentsClient
-from ..core.pagination import AsyncPager
# this is used as the default value for optional parameters
OMIT = typing.cast(typing.Any, ...)
@@ -85,7 +80,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -164,8 +159,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -239,52 +237,7 @@ def log(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
- )
+ client.agents.log()
"""
response = self._raw_client.log(
version_id=version_id,
@@ -385,20 +338,8 @@ def update_log(
api_key="YOUR_API_KEY",
)
client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
"""
response = self._raw_client.update_log(
@@ -423,7 +364,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -482,8 +423,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -585,7 +529,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -644,8 +588,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -707,15 +654,7 @@ def call(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
- )
+ client.agents.call()
"""
response = self._raw_client.call(
version_id=version_id,
@@ -859,14 +798,8 @@ def continue_call(
api_key="YOUR_API_KEY",
)
client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
"""
response = self._raw_client.continue_call(
@@ -885,10 +818,10 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> SyncPager[AgentResponse]:
+ ) -> PaginatedDataAgentResponse:
"""
Get a list of all Agents.
@@ -906,7 +839,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Agents by
order : typing.Optional[SortOrder]
@@ -917,7 +850,7 @@ def list(
Returns
-------
- SyncPager[AgentResponse]
+ PaginatedDataAgentResponse
Successful Response
Examples
@@ -927,64 +860,18 @@ def list(
client = Humanloop(
api_key="YOUR_API_KEY",
)
- response = client.agents.list(
- size=1,
- )
- for item in response:
- yield item
- # alternatively, you can paginate page-by-page
- for page in response.iter_pages():
- yield page
- """
- page = page if page is not None else 1
- _response = self._raw_client._client_wrapper.httpx_client.request(
- "agents",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ client.agents.list()
+ """
+ response = self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataAgentResponse,
- construct_type(
- type_=PaginatedDataAgentResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return SyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ return response.data
def upsert(
self,
@@ -1123,42 +1010,7 @@ def upsert(
api_key="YOUR_API_KEY",
)
client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
"""
response = self._raw_client.upsert(
@@ -1220,8 +1072,8 @@ def delete_agent_version(
api_key="YOUR_API_KEY",
)
client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
"""
response = self._raw_client.delete_agent_version(id, version_id, request_options=request_options)
@@ -1269,10 +1121,8 @@ def patch_agent_version(
api_key="YOUR_API_KEY",
)
client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
"""
response = self._raw_client.patch_agent_version(
@@ -1321,7 +1171,7 @@ def get(
api_key="YOUR_API_KEY",
)
client.agents.get(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.get(
@@ -1353,7 +1203,7 @@ def delete(self, id: str, *, request_options: typing.Optional[RequestOptions] =
api_key="YOUR_API_KEY",
)
client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.delete(id, request_options=request_options)
@@ -1401,8 +1251,7 @@ def move(
api_key="YOUR_API_KEY",
)
client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
"""
response = self._raw_client.move(
@@ -1444,7 +1293,7 @@ def list_versions(
api_key="YOUR_API_KEY",
)
client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.list_versions(
@@ -1564,7 +1413,7 @@ def list_environments(
api_key="YOUR_API_KEY",
)
client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
"""
response = self._raw_client.list_environments(id, request_options=request_options)
@@ -1610,12 +1459,7 @@ def update_monitoring(
api_key="YOUR_API_KEY",
)
client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {"evaluator_id": "ev_2345678901", "environment_id": "env_1234567890"},
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
"""
response = self._raw_client.update_monitoring(
@@ -1630,7 +1474,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize an Agent to the .agent file format.
@@ -1656,7 +1500,8 @@ def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
@@ -1740,7 +1585,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1819,8 +1664,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -1899,52 +1747,7 @@ async def log(
async def main() -> None:
- await client.agents.log(
- path="Banking/Teller Agent",
- agent={
- "provider": "anthropic",
- "endpoint": "chat",
- "model": "claude-3-7-sonnet-latest",
- "reasoning_effort": 1024,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- "max_iterations": 3,
- "tools": [
- {
- "type": "file",
- "link": {
- "file_id": "pr_1234567890",
- "version_id": "prv_1234567890",
- },
- "on_agent_call": "continue",
- },
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- },
- ],
- },
- )
+ await client.agents.log()
asyncio.run(main())
@@ -2053,20 +1856,8 @@ async def update_log(
async def main() -> None:
await client.agents.update_log(
- id="ag_1234567890",
- log_id="log_1234567890",
- messages=[
- {"role": "user", "content": "I need to withdraw $1000"},
- {
- "role": "assistant",
- "content": "Of course! Would you like to use your savings or checking account?",
- },
- ],
- output_message={
- "role": "assistant",
- "content": "I'm sorry, I can't help with that.",
- },
- log_status="complete",
+ id="id",
+ log_id="log_id",
)
@@ -2094,7 +1885,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2153,8 +1944,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2265,7 +2059,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2324,8 +2118,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2392,15 +2189,7 @@ async def call(
async def main() -> None:
- await client.agents.call(
- path="Banking/Teller Agent",
- messages=[
- {
- "role": "user",
- "content": "I'd like to deposit $1000 to my savings account from my checking account.",
- }
- ],
- )
+ await client.agents.call()
asyncio.run(main())
@@ -2561,14 +2350,8 @@ async def continue_call(
async def main() -> None:
await client.agents.continue_call(
- log_id="log_1234567890",
- messages=[
- {
- "role": "tool",
- "content": '{"type": "checking", "balance": 5200}',
- "tool_call_id": "tc_1234567890",
- }
- ],
+ log_id="log_id",
+ messages=[{"role": "user"}],
)
@@ -2590,10 +2373,10 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncPager[AgentResponse]:
+ ) -> PaginatedDataAgentResponse:
"""
Get a list of all Agents.
@@ -2611,7 +2394,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Agents by
order : typing.Optional[SortOrder]
@@ -2622,7 +2405,7 @@ async def list(
Returns
-------
- AsyncPager[AgentResponse]
+ PaginatedDataAgentResponse
Successful Response
Examples
@@ -2637,67 +2420,21 @@ async def list(
async def main() -> None:
- response = await client.agents.list(
- size=1,
- )
- async for item in response:
- yield item
- # alternatively, you can paginate page-by-page
- async for page in response.iter_pages():
- yield page
+ await client.agents.list()
asyncio.run(main())
"""
- page = page if page is not None else 1
- _response = await self._raw_client._client_wrapper.httpx_client.request(
- "agents",
- method="GET",
- params={
- "page": page,
- "size": size,
- "name": name,
- "user_filter": user_filter,
- "sort_by": sort_by,
- "order": order,
- },
+ response = await self._raw_client.list(
+ page=page,
+ size=size,
+ name=name,
+ user_filter=user_filter,
+ sort_by=sort_by,
+ order=order,
request_options=request_options,
)
- try:
- if 200 <= _response.status_code < 300:
- _parsed_response = typing.cast(
- PaginatedDataAgentResponse,
- construct_type(
- type_=PaginatedDataAgentResponse, # type: ignore
- object_=_response.json(),
- ),
- )
- _has_next = True
- _get_next = lambda: self.list(
- page=page + 1,
- size=size,
- name=name,
- user_filter=user_filter,
- sort_by=sort_by,
- order=order,
- request_options=request_options,
- )
- _items = _parsed_response.records
- return AsyncPager(has_next=_has_next, items=_items, get_next=_get_next)
- if _response.status_code == 422:
- raise UnprocessableEntityError(
- typing.cast(
- HttpValidationError,
- construct_type(
- type_=HttpValidationError, # type: ignore
- object_=_response.json(),
- ),
- )
- )
- _response_json = _response.json()
- except JSONDecodeError:
- raise ApiError(status_code=_response.status_code, body=_response.text)
- raise ApiError(status_code=_response.status_code, body=_response_json)
+ return response.data
async def upsert(
self,
@@ -2841,42 +2578,7 @@ async def upsert(
async def main() -> None:
await client.agents.upsert(
- path="Banking/Teller Agent",
- provider="anthropic",
- endpoint="chat",
- model="claude-3-7-sonnet-latest",
- reasoning_effort=1024,
- template=[
- {
- "role": "system",
- "content": "You are a helpful digital assistant, helping users navigate our digital banking platform.",
- }
- ],
- max_iterations=3,
- tools=[
- {
- "type": "inline",
- "json_schema": {
- "name": "stop",
- "description": "Call this tool when you have finished your task.",
- "parameters": {
- "type": "object",
- "properties": {
- "output": {
- "type": "string",
- "description": "The final output to return to the user.",
- }
- },
- "additionalProperties": False,
- "required": ["output"],
- },
- "strict": True,
- },
- "on_agent_call": "stop",
- }
- ],
- version_name="teller-agent-v1",
- version_description="Initial version",
+ model="model",
)
@@ -2946,8 +2648,8 @@ async def delete_agent_version(
async def main() -> None:
await client.agents.delete_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
+ id="id",
+ version_id="version_id",
)
@@ -3003,10 +2705,8 @@ async def patch_agent_version(
async def main() -> None:
await client.agents.patch_agent_version(
- id="ag_1234567890",
- version_id="agv_1234567890",
- name="teller-agent-v2",
- description="Updated version",
+ id="id",
+ version_id="version_id",
)
@@ -3063,7 +2763,7 @@ async def get(
async def main() -> None:
await client.agents.get(
- id="ag_1234567890",
+ id="id",
)
@@ -3103,7 +2803,7 @@ async def delete(self, id: str, *, request_options: typing.Optional[RequestOptio
async def main() -> None:
await client.agents.delete(
- id="ag_1234567890",
+ id="id",
)
@@ -3159,8 +2859,7 @@ async def move(
async def main() -> None:
await client.agents.move(
- id="ag_1234567890",
- path="new directory/new name",
+ id="id",
)
@@ -3210,7 +2909,7 @@ async def list_versions(
async def main() -> None:
await client.agents.list_versions(
- id="ag_1234567890",
+ id="id",
)
@@ -3354,7 +3053,7 @@ async def list_environments(
async def main() -> None:
await client.agents.list_environments(
- id="ag_1234567890",
+ id="id",
)
@@ -3408,15 +3107,7 @@ async def update_monitoring(
async def main() -> None:
await client.agents.update_monitoring(
- id="ag_1234567890",
- activate=[
- {"evaluator_version_id": "ev_1234567890"},
- {
- "evaluator_id": "ev_2345678901",
- "environment_id": "env_1234567890",
- },
- ],
- deactivate=[{"evaluator_version_id": "ev_0987654321"}],
+ id="id",
)
@@ -3434,7 +3125,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize an Agent to the .agent file format.
@@ -3460,7 +3151,8 @@ async def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
diff --git a/src/humanloop/agents/raw_client.py b/src/humanloop/agents/raw_client.py
index b13491a6..8ce4fa79 100644
--- a/src/humanloop/agents/raw_client.py
+++ b/src/humanloop/agents/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
-from ..requests.agent_kernel_request import AgentKernelRequestParams
+from .requests.agent_log_request_agent import AgentLogRequestAgentParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -19,14 +19,19 @@
from ..types.agent_log_response import AgentLogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
+from .requests.agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.agent_call_stream_response import AgentCallStreamResponse
import httpx_sse
import contextlib
from .requests.agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .requests.agents_call_request_agent import AgentsCallRequestAgentParams
from ..types.agent_call_response import AgentCallResponse
from ..types.agent_continue_call_stream_response import AgentContinueCallStreamResponse
from ..types.agent_continue_call_response import AgentContinueCallResponse
+from ..types.file_sort_by import FileSortBy
+from ..types.sort_order import SortOrder
+from ..types.paginated_data_agent_response import PaginatedDataAgentResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.agent_request_template import AgentRequestTemplateParams
from ..types.template_language import TemplateLanguage
@@ -73,7 +78,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -152,8 +157,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -247,7 +255,7 @@ def log(
object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -408,7 +416,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -467,8 +475,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -540,7 +551,7 @@ def call_stream(
object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -611,7 +622,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -670,8 +681,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -743,7 +757,7 @@ def call(
object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -980,6 +994,86 @@ def continue_call(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> HttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ HttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return HttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
def upsert(
self,
*,
@@ -1770,7 +1864,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[None]:
+ ) -> HttpResponse[str]:
"""
Serialize an Agent to the .agent file format.
@@ -1796,7 +1890,8 @@ def serialize(
Returns
-------
- HttpResponse[None]
+ HttpResponse[str]
+ Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"agents/{jsonable_encoder(id)}/serialize",
@@ -1809,7 +1904,7 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
@@ -1905,7 +2000,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentLogRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentLogRequestAgentParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1984,8 +2079,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentLogRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2079,7 +2177,7 @@ async def log(
object_=tool_choice, annotation=AgentLogRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentLogRequestAgentParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2240,7 +2338,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallStreamRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallStreamRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2299,8 +2397,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallStreamRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2372,7 +2473,7 @@ async def call_stream(
object_=tool_choice, annotation=AgentsCallStreamRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallStreamRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2443,7 +2544,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[AgentsCallRequestToolChoiceParams] = OMIT,
- agent: typing.Optional[AgentKernelRequestParams] = OMIT,
+ agent: typing.Optional[AgentsCallRequestAgentParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2502,8 +2603,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- agent : typing.Optional[AgentKernelRequestParams]
- Details of your Agent. A new Agent version will be created if the provided details are new.
+ agent : typing.Optional[AgentsCallRequestAgentParams]
+ The Agent configuration to use. Two formats are supported:
+ - An object representing the details of the Agent configuration
+ - A string representing the raw contents of a .agent file
+ A new Agent version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2575,7 +2679,7 @@ async def call(
object_=tool_choice, annotation=AgentsCallRequestToolChoiceParams, direction="write"
),
"agent": convert_and_respect_annotation_metadata(
- object_=agent, annotation=AgentKernelRequestParams, direction="write"
+ object_=agent, annotation=AgentsCallRequestAgentParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2812,6 +2916,86 @@ async def continue_call(
raise ApiError(status_code=_response.status_code, body=_response.text)
raise ApiError(status_code=_response.status_code, body=_response_json)
+ async def list(
+ self,
+ *,
+ page: typing.Optional[int] = None,
+ size: typing.Optional[int] = None,
+ name: typing.Optional[str] = None,
+ user_filter: typing.Optional[str] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
+ order: typing.Optional[SortOrder] = None,
+ request_options: typing.Optional[RequestOptions] = None,
+ ) -> AsyncHttpResponse[PaginatedDataAgentResponse]:
+ """
+ Get a list of all Agents.
+
+ Parameters
+ ----------
+ page : typing.Optional[int]
+ Page number for pagination.
+
+ size : typing.Optional[int]
+ Page size for pagination. Number of Agents to fetch.
+
+ name : typing.Optional[str]
+ Case-insensitive filter for Agent name.
+
+ user_filter : typing.Optional[str]
+ Case-insensitive filter for users in the Agent. This filter matches against both email address and name of users.
+
+ sort_by : typing.Optional[FileSortBy]
+ Field to sort Agents by
+
+ order : typing.Optional[SortOrder]
+ Direction to sort by.
+
+ request_options : typing.Optional[RequestOptions]
+ Request-specific configuration.
+
+ Returns
+ -------
+ AsyncHttpResponse[PaginatedDataAgentResponse]
+ Successful Response
+ """
+ _response = await self._client_wrapper.httpx_client.request(
+ "agents",
+ method="GET",
+ params={
+ "page": page,
+ "size": size,
+ "name": name,
+ "user_filter": user_filter,
+ "sort_by": sort_by,
+ "order": order,
+ },
+ request_options=request_options,
+ )
+ try:
+ if 200 <= _response.status_code < 300:
+ _data = typing.cast(
+ PaginatedDataAgentResponse,
+ construct_type(
+ type_=PaginatedDataAgentResponse, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ return AsyncHttpResponse(response=_response, data=_data)
+ if _response.status_code == 422:
+ raise UnprocessableEntityError(
+ typing.cast(
+ HttpValidationError,
+ construct_type(
+ type_=HttpValidationError, # type: ignore
+ object_=_response.json(),
+ ),
+ )
+ )
+ _response_json = _response.json()
+ except JSONDecodeError:
+ raise ApiError(status_code=_response.status_code, body=_response.text)
+ raise ApiError(status_code=_response.status_code, body=_response_json)
+
async def upsert(
self,
*,
@@ -3604,7 +3788,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[None]:
+ ) -> AsyncHttpResponse[str]:
"""
Serialize an Agent to the .agent file format.
@@ -3630,7 +3814,8 @@ async def serialize(
Returns
-------
- AsyncHttpResponse[None]
+ AsyncHttpResponse[str]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"agents/{jsonable_encoder(id)}/serialize",
@@ -3643,7 +3828,7 @@ async def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return AsyncHttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
diff --git a/src/humanloop/agents/requests/__init__.py b/src/humanloop/agents/requests/__init__.py
index 78a8f9ec..06ce37ed 100644
--- a/src/humanloop/agents/requests/__init__.py
+++ b/src/humanloop/agents/requests/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_log_request_agent import AgentLogRequestAgentParams
from .agent_log_request_tool_choice import AgentLogRequestToolChoiceParams
from .agent_request_reasoning_effort import AgentRequestReasoningEffortParams
from .agent_request_stop import AgentRequestStopParams
from .agent_request_template import AgentRequestTemplateParams
from .agent_request_tools_item import AgentRequestToolsItemParams
+from .agents_call_request_agent import AgentsCallRequestAgentParams
from .agents_call_request_tool_choice import AgentsCallRequestToolChoiceParams
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgentParams
from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoiceParams
__all__ = [
+ "AgentLogRequestAgentParams",
"AgentLogRequestToolChoiceParams",
"AgentRequestReasoningEffortParams",
"AgentRequestStopParams",
"AgentRequestTemplateParams",
"AgentRequestToolsItemParams",
+ "AgentsCallRequestAgentParams",
"AgentsCallRequestToolChoiceParams",
+ "AgentsCallStreamRequestAgentParams",
"AgentsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/agents/requests/agent_log_request_agent.py b/src/humanloop/agents/requests/agent_log_request_agent.py
new file mode 100644
index 00000000..1c6a7987
--- /dev/null
+++ b/src/humanloop/agents/requests/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentLogRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_request_agent.py b/src/humanloop/agents/requests/agents_call_request_agent.py
new file mode 100644
index 00000000..5c92d02b
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/requests/agents_call_stream_request_agent.py b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..e9018a18
--- /dev/null
+++ b/src/humanloop/agents/requests/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.agent_kernel_request import AgentKernelRequestParams
+
+AgentsCallStreamRequestAgentParams = typing.Union[AgentKernelRequestParams, str]
diff --git a/src/humanloop/agents/types/__init__.py b/src/humanloop/agents/types/__init__.py
index 73d98669..9c8a955c 100644
--- a/src/humanloop/agents/types/__init__.py
+++ b/src/humanloop/agents/types/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .agent_log_request_agent import AgentLogRequestAgent
from .agent_log_request_tool_choice import AgentLogRequestToolChoice
from .agent_request_reasoning_effort import AgentRequestReasoningEffort
from .agent_request_stop import AgentRequestStop
from .agent_request_template import AgentRequestTemplate
from .agent_request_tools_item import AgentRequestToolsItem
+from .agents_call_request_agent import AgentsCallRequestAgent
from .agents_call_request_tool_choice import AgentsCallRequestToolChoice
+from .agents_call_stream_request_agent import AgentsCallStreamRequestAgent
from .agents_call_stream_request_tool_choice import AgentsCallStreamRequestToolChoice
__all__ = [
+ "AgentLogRequestAgent",
"AgentLogRequestToolChoice",
"AgentRequestReasoningEffort",
"AgentRequestStop",
"AgentRequestTemplate",
"AgentRequestToolsItem",
+ "AgentsCallRequestAgent",
"AgentsCallRequestToolChoice",
+ "AgentsCallStreamRequestAgent",
"AgentsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/agents/types/agent_log_request_agent.py b/src/humanloop/agents/types/agent_log_request_agent.py
new file mode 100644
index 00000000..011a2b9d
--- /dev/null
+++ b/src/humanloop/agents/types/agent_log_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentLogRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_request_agent.py b/src/humanloop/agents/types/agents_call_request_agent.py
new file mode 100644
index 00000000..5f663ad3
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/src/humanloop/agents/types/agents_call_stream_request_agent.py b/src/humanloop/agents/types/agents_call_stream_request_agent.py
new file mode 100644
index 00000000..4b2654e9
--- /dev/null
+++ b/src/humanloop/agents/types/agents_call_stream_request_agent.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.agent_kernel_request import AgentKernelRequest
+
+AgentsCallStreamRequestAgent = typing.Union[AgentKernelRequest, str]
diff --git a/tests/integration/__init__.py b/src/humanloop/cli/__init__.py
similarity index 100%
rename from tests/integration/__init__.py
rename to src/humanloop/cli/__init__.py
diff --git a/src/humanloop/cli/__main__.py b/src/humanloop/cli/__main__.py
new file mode 100644
index 00000000..ad582bbc
--- /dev/null
+++ b/src/humanloop/cli/__main__.py
@@ -0,0 +1,248 @@
+import click
+import logging
+from typing import Optional, Callable
+from functools import wraps
+from dotenv import load_dotenv
+import os
+import sys
+from humanloop import Humanloop
+from humanloop.sync.sync_client import SyncClient
+import time
+
+# Set up logging
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.INFO) # Set back to INFO level
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s") # Simplified formatter
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+# Color constants
+SUCCESS_COLOR = "green"
+ERROR_COLOR = "red"
+INFO_COLOR = "blue"
+WARNING_COLOR = "yellow"
+
+
+def load_api_key(env_file: Optional[str] = None) -> str:
+ """Load API key from .env file or environment variable.
+
+ Args:
+ env_file: Optional path to .env file
+
+ Returns:
+ str: The loaded API key
+
+ Raises:
+ click.ClickException: If no API key is found
+ """
+ # Try specific .env file if provided, otherwise default to .env in current directory
+ if env_file:
+ if not load_dotenv(env_file): # load_dotenv returns False if file not found/invalid
+ raise click.ClickException(
+ click.style(
+ f"Failed to load environment file: {env_file} (file not found or invalid format)",
+ fg=ERROR_COLOR,
+ )
+ )
+ else:
+ load_dotenv() # Attempt to load from default .env in current directory
+
+ # Get API key from environment
+ api_key = os.getenv("HUMANLOOP_API_KEY")
+ if not api_key:
+ raise click.ClickException(
+ click.style(
+ "No API key found. Set HUMANLOOP_API_KEY in .env file or environment, or use --api-key", fg=ERROR_COLOR
+ )
+ )
+
+ return api_key
+
+
+def get_client(
+ api_key: Optional[str] = None, env_file: Optional[str] = None, base_url: Optional[str] = None
+) -> Humanloop:
+ """Instantiate a Humanloop client for the CLI.
+
+ Args:
+ api_key: Optional API key provided directly
+ env_file: Optional path to .env file
+ base_url: Optional base URL for the API
+
+ Returns:
+ Humanloop: Configured client instance
+
+ Raises:
+ click.ClickException: If no API key is found
+ """
+ if not api_key:
+ api_key = load_api_key(env_file)
+ return Humanloop(api_key=api_key, base_url=base_url)
+
+
+def common_options(f: Callable) -> Callable:
+ """Decorator for common CLI options."""
+
+ @click.option(
+ "--api-key",
+ help="Humanloop API key. If not provided, uses HUMANLOOP_API_KEY from .env or environment.",
+ default=None,
+ show_default=False,
+ )
+ @click.option(
+ "--env-file",
+ help="Path to .env file. If not provided, looks for .env in current directory.",
+ default=None,
+ type=click.Path(exists=True),
+ show_default=False,
+ )
+ @click.option(
+ "--local-files-directory",
+ "--local-dir",
+ help="Directory (relative to the current working directory) where Humanloop files are stored locally (default: humanloop/).",
+ default="humanloop",
+ type=click.Path(),
+ )
+ @click.option(
+ "--base-url",
+ default=None,
+ hidden=True,
+ )
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ return f(*args, **kwargs)
+
+ return wrapper
+
+
+def handle_sync_errors(f: Callable) -> Callable:
+ """Decorator for handling sync operation errors.
+
+ If an error occurs in any operation that uses this decorator, it will be logged and the program will exit with a non-zero exit code.
+ """
+
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except Exception as e:
+ click.echo(click.style(str(f"Error: {e}"), fg=ERROR_COLOR))
+ sys.exit(1)
+
+ return wrapper
+
+
+@click.group(
+ help="Humanloop CLI for managing sync operations.",
+ context_settings={
+ "help_option_names": ["-h", "--help"],
+ "max_content_width": 100,
+ },
+)
+def cli(): # Does nothing because used as a group for other subcommands (pull, push, etc.)
+ """Humanloop CLI for managing sync operations."""
+ pass
+
+
+@cli.command()
+@click.option(
+ "--path",
+ "-p",
+ help="Path in the Humanloop workspace to pull from (file or directory). You can pull an entire directory (e.g. 'my/directory') "
+ "or a specific file (e.g. 'my/directory/my_prompt.prompt'). When pulling a directory, all files within that directory and its subdirectories will be included. "
+ "If not specified, pulls from the root of the remote workspace.",
+ default=None,
+)
+@click.option(
+ "--environment",
+ "-e",
+ help="Environment to pull from (e.g. 'production', 'staging')",
+ default=None,
+)
+@click.option(
+ "--verbose",
+ "-v",
+ is_flag=True,
+ help="Show detailed information about the operation",
+)
+@click.option(
+ "--quiet",
+ "-q",
+ is_flag=True,
+ help="Suppress output of successful files",
+)
+@handle_sync_errors
+@common_options
+def pull(
+ path: Optional[str],
+ environment: Optional[str],
+ api_key: Optional[str],
+ env_file: Optional[str],
+ local_files_directory: str,
+ base_url: Optional[str],
+ verbose: bool,
+ quiet: bool,
+):
+ """Pull Prompt and Agent files from Humanloop to your local filesystem.
+
+ \b
+ This command will:
+ 1. Fetch Prompt and Agent files from your Humanloop workspace
+ 2. Save them to your local filesystem (directory specified by --local-files-directory, default: humanloop/)
+ 3. Maintain the same directory structure as in Humanloop
+ 4. Add appropriate file extensions (.prompt or .agent)
+
+ \b
+ For example, with the default --local-files-directory=humanloop, files will be saved as:
+ ./humanloop/
+ ├── my_project/
+ │ ├── prompts/
+ │ │ ├── my_prompt.prompt
+ │ │ └── nested/
+ │ │ └── another_prompt.prompt
+ │ └── agents/
+ │ └── my_agent.agent
+ └── another_project/
+ └── prompts/
+ └── other_prompt.prompt
+
+ \b
+ If you specify --local-files-directory=data/humanloop, files will be saved in ./data/humanloop/ instead.
+
+ If a file exists both locally and in the Humanloop workspace, the local file will be overwritten
+ with the version from Humanloop. Files that only exist locally will not be affected.
+
+ Currently only supports syncing Prompt and Agent files. Other file types will be skipped."""
+ client = get_client(api_key, env_file, base_url)
+ sync_client = SyncClient(
+ client, base_dir=local_files_directory, log_level=logging.DEBUG if verbose else logging.WARNING
+ )
+
+ click.echo(click.style("Pulling files from Humanloop...", fg=INFO_COLOR))
+ click.echo(click.style(f"Path: {path or '(root)'}", fg=INFO_COLOR))
+ click.echo(click.style(f"Environment: {environment or '(default)'}", fg=INFO_COLOR))
+
+ start_time = time.time()
+ successful_files, failed_files = sync_client.pull(path, environment)
+ duration_ms = int((time.time() - start_time) * 1000)
+
+ # Determine if the operation was successful based on failed_files
+ is_successful = not failed_files
+ duration_color = SUCCESS_COLOR if is_successful else ERROR_COLOR
+ click.echo(click.style(f"Pull completed in {duration_ms}ms", fg=duration_color))
+
+ if successful_files and not quiet:
+ click.echo(click.style(f"\nSuccessfully pulled {len(successful_files)} files:", fg=SUCCESS_COLOR))
+ for file in successful_files:
+ click.echo(click.style(f" ✓ {file}", fg=SUCCESS_COLOR))
+
+ if failed_files:
+ click.echo(click.style(f"\nFailed to pull {len(failed_files)} files:", fg=ERROR_COLOR))
+ for file in failed_files:
+ click.echo(click.style(f" ✗ {file}", fg=ERROR_COLOR))
+
+
+if __name__ == "__main__":
+ cli()
diff --git a/src/humanloop/client.py b/src/humanloop/client.py
index 74cd6c97..fce02a98 100644
--- a/src/humanloop/client.py
+++ b/src/humanloop/client.py
@@ -1,6 +1,7 @@
import os
import typing
-from typing import Any, List, Optional, Sequence
+from typing import Any, List, Optional, Sequence, Tuple
+import logging
import httpx
from opentelemetry.sdk.resources import Resource
@@ -18,7 +19,7 @@
)
from humanloop.base_client import AsyncBaseHumanloop, BaseHumanloop
-from humanloop.overload import overload_call, overload_log
+from humanloop.overload import overload_client
from humanloop.decorators.flow import flow as flow_decorator_factory
from humanloop.decorators.prompt import prompt_decorator_factory
from humanloop.decorators.tool import tool_decorator_factory as tool_decorator_factory
@@ -29,6 +30,9 @@
from humanloop.otel.processor import HumanloopSpanProcessor
from humanloop.prompt_utils import populate_template
from humanloop.prompts.client import PromptsClient
+from humanloop.sync.sync_client import SyncClient, DEFAULT_CACHE_SIZE
+
+logger = logging.getLogger("humanloop.sdk")
class ExtendedEvalsClient(EvaluationsClient):
@@ -87,8 +91,9 @@ class Humanloop(BaseHumanloop):
"""
See docstring of :class:`BaseHumanloop`.
- This class extends the base client with custom evaluation utilities
- and decorators for declaring Files in code.
+ This class extends the base client with custom evaluation utilities,
+ decorators for declaring Files in code, and utilities for syncing
+ files between Humanloop and local filesystem.
"""
def __init__(
@@ -102,6 +107,9 @@ def __init__(
httpx_client: typing.Optional[httpx.Client] = None,
opentelemetry_tracer_provider: Optional[TracerProvider] = None,
opentelemetry_tracer: Optional[Tracer] = None,
+ use_local_files: bool = False,
+ local_files_directory: str = "humanloop",
+ cache_size: int = DEFAULT_CACHE_SIZE,
):
"""
Extends the base client with custom evaluation utilities and
@@ -111,6 +119,27 @@ def __init__(
You can provide a TracerProvider and a Tracer to integrate
with your existing telemetry system. If not provided,
an internal TracerProvider will be used.
+
+ Parameters
+ ----------
+ base_url: Optional base URL for the API
+ environment: The environment to use (default: DEFAULT)
+ api_key: Your Humanloop API key (default: from HUMANLOOP_API_KEY env var)
+ timeout: Optional timeout for API requests
+ follow_redirects: Whether to follow redirects
+ httpx_client: Optional custom httpx client
+ opentelemetry_tracer_provider: Optional tracer provider for telemetry
+ opentelemetry_tracer: Optional tracer for telemetry
+ use_local_files: Whether to use local files for prompts and agents
+ local_files_directory: Base directory where local prompt and agent files are stored (default: "humanloop").
+ This is relative to the current working directory. For example:
+ - "humanloop" will look for files in "./humanloop/"
+ - "data/humanloop" will look for files in "./data/humanloop/"
+ When using paths in the API, they must be relative to this directory. For example,
+ if local_files_directory="humanloop" and you have a file at "humanloop/samples/test.prompt",
+ you would reference it as "samples/test" in your code.
+ cache_size: Maximum number of files to cache when use_local_files is True (default: DEFAULT_CACHE_SIZE).
+ This parameter has no effect if use_local_files is False.
"""
super().__init__(
base_url=base_url,
@@ -121,6 +150,17 @@ def __init__(
httpx_client=httpx_client,
)
+ self.use_local_files = use_local_files
+
+ # Warn user if cache_size is non-default but use_local_files is False — has no effect and will therefore be ignored
+ if not self.use_local_files and cache_size != DEFAULT_CACHE_SIZE:
+ logger.warning(
+ f"The specified cache_size={cache_size} will have no effect because use_local_files=False. "
+ f"File caching is only active when local files are enabled."
+ )
+
+ # Check if cache_size is non-default but use_local_files is False
+ self._sync_client = SyncClient(client=self, base_dir=local_files_directory, cache_size=cache_size)
eval_client = ExtendedEvalsClient(client_wrapper=self._client_wrapper)
eval_client.client = self
self.evaluations = eval_client
@@ -128,10 +168,14 @@ def __init__(
# Overload the .log method of the clients to be aware of Evaluation Context
# and the @flow decorator providing the trace_id
- self.prompts = overload_log(client=self.prompts)
- self.prompts = overload_call(client=self.prompts)
- self.flows = overload_log(client=self.flows)
- self.tools = overload_log(client=self.tools)
+ self.prompts = overload_client(
+ client=self.prompts, sync_client=self._sync_client, use_local_files=self.use_local_files
+ )
+ self.agents = overload_client(
+ client=self.agents, sync_client=self._sync_client, use_local_files=self.use_local_files
+ )
+ self.flows = overload_client(client=self.flows)
+ self.tools = overload_client(client=self.tools)
if opentelemetry_tracer_provider is not None:
self._tracer_provider = opentelemetry_tracer_provider
@@ -351,6 +395,53 @@ def agent():
attributes=attributes,
)
+ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple[List[str], List[str]]:
+ """Pull Prompt and Agent files from Humanloop to local filesystem.
+
+ This method will:
+ 1. Fetch Prompt and Agent files from your Humanloop workspace
+ 2. Save them to your local filesystem (directory specified by `local_files_directory`, default: "humanloop")
+ 3. Maintain the same directory structure as in Humanloop
+ 4. Add appropriate file extensions (`.prompt` or `.agent`)
+
+ The path parameter can be used in two ways:
+ - If it points to a specific file (e.g. "path/to/file.prompt" or "path/to/file.agent"), only that file will be pulled
+ - If it points to a directory (e.g. "path/to/directory"), all Prompt and Agent files in that directory and its subdirectories will be pulled
+ - If no path is provided, all Prompt and Agent files will be pulled
+
+ The operation will overwrite existing files with the latest version from Humanloop
+ but will not delete local files that don't exist in the remote workspace.
+
+ Currently only supports syncing Prompt and Agent files. Other file types will be skipped.
+
+ For example, with the default `local_files_directory="humanloop"`, files will be saved as:
+ ```
+ ./humanloop/
+ ├── my_project/
+ │ ├── prompts/
+ │ │ ├── my_prompt.prompt
+ │ │ └── nested/
+ │ │ └── another_prompt.prompt
+ │ └── agents/
+ │ └── my_agent.agent
+ └── another_project/
+ └── prompts/
+ └── other_prompt.prompt
+ ```
+
+ If you specify `local_files_directory="data/humanloop"`, files will be saved in ./data/humanloop/ instead.
+
+ :param path: Optional path to either a specific file (e.g. "path/to/file.prompt") or a directory (e.g. "path/to/directory").
+ If not provided, all Prompt and Agent files will be pulled.
+ :param environment: The environment to pull the files from.
+ :return: Tuple of two lists:
+ - First list contains paths of successfully synced files
+ - Second list contains paths of files that failed to sync (due to API errors, missing content,
+ or filesystem issues)
+ :raises HumanloopRuntimeError: If there's an error communicating with the API
+ """
+ return self._sync_client.pull(environment=environment, path=path)
+
class AsyncHumanloop(AsyncBaseHumanloop):
"""
diff --git a/src/humanloop/core/client_wrapper.py b/src/humanloop/core/client_wrapper.py
index 71036800..94cf9db0 100644
--- a/src/humanloop/core/client_wrapper.py
+++ b/src/humanloop/core/client_wrapper.py
@@ -14,10 +14,10 @@ def __init__(self, *, api_key: str, base_url: str, timeout: typing.Optional[floa
def get_headers(self) -> typing.Dict[str, str]:
headers: typing.Dict[str, str] = {
- "User-Agent": "humanloop/0.8.36b1",
+ "User-Agent": "humanloop/0.8.36",
"X-Fern-Language": "Python",
"X-Fern-SDK-Name": "humanloop",
- "X-Fern-SDK-Version": "0.8.36b1",
+ "X-Fern-SDK-Version": "0.8.36",
}
headers["X-API-KEY"] = self.api_key
return headers
diff --git a/src/humanloop/datasets/client.py b/src/humanloop/datasets/client.py
index 0e70c39c..8ab493a3 100644
--- a/src/humanloop/datasets/client.py
+++ b/src/humanloop/datasets/client.py
@@ -3,7 +3,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawDatasetsClient
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.pagination import SyncPager
@@ -55,7 +55,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[DatasetResponse]:
@@ -76,7 +76,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Datasets by
order : typing.Optional[SortOrder]
@@ -857,7 +857,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[DatasetResponse]:
@@ -878,7 +878,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Dataset. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Datasets by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/evaluators/client.py b/src/humanloop/evaluators/client.py
index 88ec32c8..7fb13a71 100644
--- a/src/humanloop/evaluators/client.py
+++ b/src/humanloop/evaluators/client.py
@@ -10,7 +10,7 @@
from .requests.create_evaluator_log_request_spec import CreateEvaluatorLogRequestSpecParams
from ..core.request_options import RequestOptions
from ..types.create_evaluator_log_response import CreateEvaluatorLogResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.evaluator_response import EvaluatorResponse
@@ -234,7 +234,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[EvaluatorResponse]:
@@ -255,7 +255,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Evaluators by
order : typing.Optional[SortOrder]
@@ -1032,7 +1032,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[EvaluatorResponse]:
@@ -1053,7 +1053,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Evaluator. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Evaluators by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/files/client.py b/src/humanloop/files/client.py
index 693b46cb..b4fd4274 100644
--- a/src/humanloop/files/client.py
+++ b/src/humanloop/files/client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawFilesClient
from ..types.file_type import FileType
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..types.paginated_data_union_prompt_response_tool_response_dataset_response_evaluator_response_flow_response_agent_response import (
@@ -39,11 +39,13 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
@@ -60,6 +62,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -69,12 +74,15 @@ def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -96,11 +104,13 @@ def list_files(
page=page,
size=size,
name=name,
+ path=path,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_raw_file_content=include_raw_file_content,
request_options=request_options,
)
return response.data
@@ -110,6 +120,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -123,6 +134,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -143,7 +157,10 @@ def retrieve_by_path(
)
"""
response = self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path,
+ environment=environment,
+ include_raw_file_content=include_raw_file_content,
+ request_options=request_options,
)
return response.data
@@ -169,11 +186,13 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse:
"""
@@ -190,6 +209,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -199,12 +221,15 @@ async def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -234,11 +259,13 @@ async def main() -> None:
page=page,
size=size,
name=name,
+ path=path,
template=template,
type=type,
environment=environment,
sort_by=sort_by,
order=order,
+ include_raw_file_content=include_raw_file_content,
request_options=request_options,
)
return response.data
@@ -248,6 +275,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> RetrieveByPathFilesRetrieveByPathPostResponse:
"""
@@ -261,6 +289,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -289,6 +320,9 @@ async def main() -> None:
asyncio.run(main())
"""
response = await self._raw_client.retrieve_by_path(
- path=path, environment=environment, request_options=request_options
+ path=path,
+ environment=environment,
+ include_raw_file_content=include_raw_file_content,
+ request_options=request_options,
)
return response.data
diff --git a/src/humanloop/files/raw_client.py b/src/humanloop/files/raw_client.py
index 01b48e03..02f371ac 100644
--- a/src/humanloop/files/raw_client.py
+++ b/src/humanloop/files/raw_client.py
@@ -3,7 +3,7 @@
import typing
from ..core.client_wrapper import SyncClientWrapper
from ..types.file_type import FileType
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.request_options import RequestOptions
from ..core.http_response import HttpResponse
@@ -33,11 +33,13 @@ def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
@@ -56,6 +58,9 @@ def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -65,12 +70,15 @@ def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -86,11 +94,13 @@ def list_files(
"page": page,
"size": size,
"name": name,
+ "path": path,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_raw_file_content": include_raw_file_content,
},
request_options=request_options,
)
@@ -124,6 +134,7 @@ def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> HttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -137,6 +148,9 @@ def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -150,6 +164,7 @@ def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_raw_file_content": include_raw_file_content,
},
json={
"path": path,
@@ -196,11 +211,13 @@ async def list_files(
page: typing.Optional[int] = None,
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
+ path: typing.Optional[str] = None,
template: typing.Optional[bool] = None,
type: typing.Optional[typing.Union[FileType, typing.Sequence[FileType]]] = None,
environment: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[
PaginatedDataUnionPromptResponseToolResponseDatasetResponseEvaluatorResponseFlowResponseAgentResponse
@@ -219,6 +236,9 @@ async def list_files(
name : typing.Optional[str]
Case-insensitive filter for file name.
+ path : typing.Optional[str]
+ Path of the directory to filter for. Returns files in this directory and all its subdirectories.
+
template : typing.Optional[bool]
Filter to include only template files.
@@ -228,12 +248,15 @@ async def list_files(
environment : typing.Optional[str]
Case-sensitive filter for files with a deployment in the specified environment. Requires the environment name.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort files by
order : typing.Optional[SortOrder]
Direction to sort by.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -249,11 +272,13 @@ async def list_files(
"page": page,
"size": size,
"name": name,
+ "path": path,
"template": template,
"type": type,
"environment": environment,
"sort_by": sort_by,
"order": order,
+ "include_raw_file_content": include_raw_file_content,
},
request_options=request_options,
)
@@ -287,6 +312,7 @@ async def retrieve_by_path(
*,
path: str,
environment: typing.Optional[str] = None,
+ include_raw_file_content: typing.Optional[bool] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncHttpResponse[RetrieveByPathFilesRetrieveByPathPostResponse]:
"""
@@ -300,6 +326,9 @@ async def retrieve_by_path(
environment : typing.Optional[str]
Name of the Environment to retrieve a deployed Version from.
+ include_raw_file_content : typing.Optional[bool]
+ Whether to include the raw file content in the response. Currently only supported for Agents and Prompts.
+
request_options : typing.Optional[RequestOptions]
Request-specific configuration.
@@ -313,6 +342,7 @@ async def retrieve_by_path(
method="POST",
params={
"environment": environment,
+ "include_raw_file_content": include_raw_file_content,
},
json={
"path": path,
diff --git a/src/humanloop/flows/client.py b/src/humanloop/flows/client.py
index bcb9491c..9ba82a6a 100644
--- a/src/humanloop/flows/client.py
+++ b/src/humanloop/flows/client.py
@@ -11,7 +11,7 @@
from ..types.create_flow_log_response import CreateFlowLogResponse
from ..types.flow_log_response import FlowLogResponse
from ..types.flow_response import FlowResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.paginated_data_flow_response import PaginatedDataFlowResponse
@@ -469,7 +469,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[FlowResponse]:
@@ -490,7 +490,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Flows by
order : typing.Optional[SortOrder]
@@ -1418,7 +1418,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[FlowResponse]:
@@ -1439,7 +1439,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Flow. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Flows by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/logs/client.py b/src/humanloop/logs/client.py
index 8733ed37..26cb2465 100644
--- a/src/humanloop/logs/client.py
+++ b/src/humanloop/logs/client.py
@@ -3,7 +3,6 @@
from ..core.client_wrapper import SyncClientWrapper
from .raw_client import RawLogsClient
import typing
-from ..types.version_status import VersionStatus
import datetime as dt
from ..core.request_options import RequestOptions
from ..core.pagination import SyncPager
diff --git a/src/humanloop/overload.py b/src/humanloop/overload.py
index b0c83215..92c83e6b 100644
--- a/src/humanloop/overload.py
+++ b/src/humanloop/overload.py
@@ -1,54 +1,69 @@
import inspect
import logging
import types
-from typing import TypeVar, Union
+from typing import Any, Dict, Optional, Union, Callable
from humanloop.context import (
get_decorator_context,
get_evaluation_context,
get_trace_id,
)
-from humanloop.evals.run import HumanloopRuntimeError
-
-from humanloop.evaluators.client import EvaluatorsClient
-from humanloop.flows.client import FlowsClient
+from humanloop.error import HumanloopRuntimeError
+from humanloop.sync.sync_client import SyncClient
from humanloop.prompts.client import PromptsClient
+from humanloop.flows.client import FlowsClient
+from humanloop.datasets.client import DatasetsClient
+from humanloop.agents.client import AgentsClient
from humanloop.tools.client import ToolsClient
+from humanloop.evaluators.client import EvaluatorsClient
+from humanloop.types import FileType
from humanloop.types.create_evaluator_log_response import CreateEvaluatorLogResponse
from humanloop.types.create_flow_log_response import CreateFlowLogResponse
from humanloop.types.create_prompt_log_response import CreatePromptLogResponse
from humanloop.types.create_tool_log_response import CreateToolLogResponse
from humanloop.types.prompt_call_response import PromptCallResponse
+from humanloop.types.agent_call_response import AgentCallResponse
logger = logging.getLogger("humanloop.sdk")
-
-CLIENT_TYPE = TypeVar("CLIENT_TYPE", PromptsClient, FlowsClient, EvaluatorsClient, ToolsClient)
-
-
-def overload_log(client: CLIENT_TYPE) -> CLIENT_TYPE:
- """
- Wrap the `log` method of the provided Humanloop client to use EVALUATION_CONTEXT.
-
- This makes the overloaded log actions be aware of whether the created Log is
- part of an Evaluation (e.g. one started by eval_utils.run_eval).
- """
- # Copy the original log method in a hidden attribute
- client._log = client.log # type: ignore [attr-defined]
-
- def _overload_log(
- # It's safe to only consider kwargs since the original
- # log method bans positional arguments
- self,
- **kwargs,
- ) -> Union[
- CreatePromptLogResponse,
- CreateToolLogResponse,
- CreateFlowLogResponse,
- CreateEvaluatorLogResponse,
- ]:
- trace_id = get_trace_id()
- if trace_id is not None and type(client) is FlowsClient:
+LogResponseType = Union[
+ CreatePromptLogResponse,
+ CreateToolLogResponse,
+ CreateFlowLogResponse,
+ CreateEvaluatorLogResponse,
+]
+
+CallResponseType = Union[
+ PromptCallResponse,
+ AgentCallResponse,
+]
+
+
+def _get_file_type_from_client(
+ client: Union[PromptsClient, AgentsClient, ToolsClient, FlowsClient, DatasetsClient, EvaluatorsClient],
+) -> FileType:
+ """Get the file type based on the client type."""
+ if isinstance(client, PromptsClient):
+ return "prompt"
+ elif isinstance(client, AgentsClient):
+ return "agent"
+ elif isinstance(client, ToolsClient):
+ return "tool"
+ elif isinstance(client, FlowsClient):
+ return "flow"
+ elif isinstance(client, DatasetsClient):
+ return "dataset"
+ elif isinstance(client, EvaluatorsClient):
+ return "evaluator"
+
+ raise ValueError(f"Unsupported client type: {type(client)}")
+
+
+def _handle_tracing_context(kwargs: Dict[str, Any], client: Any) -> Dict[str, Any]:
+ """Handle tracing context for both log and call methods."""
+ trace_id = get_trace_id()
+ if trace_id is not None:
+ if "flow" in str(type(client).__name__).lower():
context = get_decorator_context()
if context is None:
raise HumanloopRuntimeError("Internal error: trace_id context is set outside a decorator context.")
@@ -56,69 +71,146 @@ def _overload_log(
f"Using `flows.log()` is not allowed: Flow decorator "
f"for File {context.path} manages the tracing and trace completion."
)
- if trace_id is not None:
- if "trace_parent_id" in kwargs:
- logger.warning(
- "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
- inspect.currentframe().f_lineno, # type: ignore [union-attr]
- )
- kwargs = {
- **kwargs,
- "trace_parent_id": trace_id,
- }
- evaluation_context = get_evaluation_context()
- if evaluation_context is not None:
- kwargs_eval, eval_callback = evaluation_context.log_args_with_context(
- path=kwargs.get("path"), log_args=kwargs
- )
- try:
- response = self._log(**kwargs_eval)
- except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
- raise HumanloopRuntimeError from e
- if eval_callback is not None:
- eval_callback(response.id)
- else:
- try:
- response = self._log(**kwargs)
- except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
- raise HumanloopRuntimeError from e
-
- return response
- # Replace the original log method with the overloaded one
- client.log = types.MethodType(_overload_log, client) # type: ignore [assignment]
- # Return the client with the overloaded log method
- logger.debug("Overloaded the .call method of %s", client)
- return client
+ if "trace_parent_id" in kwargs:
+ logger.warning(
+ "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
+ inspect.currentframe().f_lineno, # type: ignore[union-attr]
+ )
+ kwargs = {
+ **kwargs,
+ "trace_parent_id": trace_id,
+ }
+ return kwargs
+
+
+def _handle_local_files(
+ kwargs: Dict[str, Any],
+ client: Any,
+ sync_client: Optional[SyncClient],
+ use_local_files: bool,
+) -> Dict[str, Any]:
+ """Handle local file loading if enabled."""
+ if not use_local_files or "path" not in kwargs or sync_client is None:
+ return kwargs
+
+ if "id" in kwargs:
+ raise HumanloopRuntimeError("Can only specify one of `id` or `path`")
+
+ # Check if version_id or environment is specified
+ use_remote = any(["version_id" in kwargs, "environment" in kwargs])
+ normalized_path = sync_client._normalize_path(kwargs["path"])
+
+ if use_remote:
+ raise HumanloopRuntimeError(
+ f"Cannot use local file for `{normalized_path}` as version_id or environment was specified. "
+ "Please either remove version_id/environment to use local files, or set use_local_files=False to use remote files."
+ )
+
+ file_type = _get_file_type_from_client(client)
+ if file_type not in SyncClient.SERIALIZABLE_FILE_TYPES:
+ raise HumanloopRuntimeError(f"Local files are not supported for `{file_type}` files.")
+
+ # If file_type is already specified in kwargs, it means user provided a PromptKernelRequestParams object
+ if file_type in kwargs and not isinstance(kwargs[file_type], str):
+ logger.warning(
+ f"Ignoring local file for `{normalized_path}` as {file_type} parameters were directly provided. "
+ "Using provided parameters instead."
+ )
+ return kwargs
+
+ try:
+ file_content = sync_client.get_file_content(normalized_path, file_type) # type: ignore[arg-type] # file_type was checked above
+ kwargs[file_type] = file_content
+ except HumanloopRuntimeError as e:
+ raise HumanloopRuntimeError(f"Failed to use local file for `{normalized_path}`: {str(e)}")
+
+ return kwargs
+
+
+def _handle_evaluation_context(kwargs: Dict[str, Any]) -> tuple[Dict[str, Any], Optional[Callable[[str], None]]]:
+ """Handle evaluation context for logging."""
+ evaluation_context = get_evaluation_context()
+ if evaluation_context is not None:
+ return evaluation_context.log_args_with_context(path=kwargs.get("path"), log_args=kwargs)
+ return kwargs, None
+
+
+def _overload_log(self: Any, sync_client: Optional[SyncClient], use_local_files: bool, **kwargs) -> LogResponseType:
+ try:
+ # Special handling for flows - prevent direct log usage
+ if type(self) is FlowsClient and get_trace_id() is not None:
+ context = get_decorator_context()
+ if context is None:
+ raise HumanloopRuntimeError("Internal error: trace_id context is set outside a decorator context.")
+ raise HumanloopRuntimeError(
+ f"Using `flows.log()` is not allowed: Flow decorator "
+ f"for File {context.path} manages the tracing and trace completion."
+ )
+ kwargs = _handle_tracing_context(kwargs, self)
-def overload_call(client: PromptsClient) -> PromptsClient:
- client._call = client.call # type: ignore [attr-defined]
-
- def _overload_call(self, **kwargs) -> PromptCallResponse:
- # None if not logging inside a decorator
- trace_id = get_trace_id()
- if trace_id is not None:
- if "trace_parent_id" in kwargs:
- logger.warning(
- "Ignoring trace_parent_id argument at line %d: the Flow decorator manages tracing.",
- inspect.currentframe().f_lineno, # type: ignore [union-attr]
- )
- kwargs = {
- **kwargs,
- "trace_parent_id": trace_id,
- }
-
- try:
- response = self._call(**kwargs)
- except Exception as e:
- # Re-raising as HumanloopDecoratorError so the decorators don't catch it
- raise HumanloopRuntimeError from e
+ # Handle local files for Prompts and Agents clients
+ if _get_file_type_from_client(self) in ["prompt", "agent"]:
+ if sync_client is None:
+ logger.error("sync_client is None but client has log method and use_local_files=%s", use_local_files)
+ raise HumanloopRuntimeError("sync_client is required for clients that support local file operations")
+ kwargs = _handle_local_files(kwargs, self, sync_client, use_local_files)
+ kwargs, eval_callback = _handle_evaluation_context(kwargs)
+ response = self._log(**kwargs) # Use stored original method
+ if eval_callback is not None:
+ eval_callback(response.id)
return response
+ except HumanloopRuntimeError:
+ # Re-raise HumanloopRuntimeError without wrapping to preserve the message
+ raise
+ except Exception as e:
+ # Only wrap non-HumanloopRuntimeError exceptions
+ raise HumanloopRuntimeError from e
+
+
+def _overload_call(self: Any, sync_client: Optional[SyncClient], use_local_files: bool, **kwargs) -> CallResponseType:
+ try:
+ kwargs = _handle_tracing_context(kwargs, self)
+ kwargs = _handle_local_files(kwargs, self, sync_client, use_local_files)
+ return self._call(**kwargs) # Use stored original method
+ except HumanloopRuntimeError:
+ # Re-raise HumanloopRuntimeError without wrapping to preserve the message
+ raise
+ except Exception as e:
+ # Only wrap non-HumanloopRuntimeError exceptions
+ raise HumanloopRuntimeError from e
+
+
+def overload_client(
+ client: Any,
+ sync_client: Optional[SyncClient] = None,
+ use_local_files: bool = False,
+) -> Any:
+ """Overloads client methods to add tracing, local file handling, and evaluation context."""
+ # Store original log method as _log for all clients. Used in flow decorator
+ if hasattr(client, "log") and not hasattr(client, "_log"):
+ client._log = client.log # type: ignore[attr-defined]
+
+ # Create a closure to capture sync_client and use_local_files
+ def log_wrapper(self: Any, **kwargs) -> LogResponseType:
+ return _overload_log(self, sync_client, use_local_files, **kwargs)
+
+ client.log = types.MethodType(log_wrapper, client)
+
+ # Overload call method for Prompt and Agent clients
+ if _get_file_type_from_client(client) in ["prompt", "agent"]:
+ if sync_client is None and use_local_files:
+ logger.error("sync_client is None but client has call method and use_local_files=%s", use_local_files)
+ raise HumanloopRuntimeError("sync_client is required for clients that support call operations")
+ if hasattr(client, "call") and not hasattr(client, "_call"):
+ client._call = client.call # type: ignore[attr-defined]
+
+ # Create a closure to capture sync_client and use_local_files
+ def call_wrapper(self: Any, **kwargs) -> CallResponseType:
+ return _overload_call(self, sync_client, use_local_files, **kwargs)
+
+ client.call = types.MethodType(call_wrapper, client)
- # Replace the original log method with the overloaded one
- client.call = types.MethodType(_overload_call, client) # type: ignore [assignment]
return client
diff --git a/src/humanloop/prompts/__init__.py b/src/humanloop/prompts/__init__.py
index ae141d57..557dcc5c 100644
--- a/src/humanloop/prompts/__init__.py
+++ b/src/humanloop/prompts/__init__.py
@@ -1,25 +1,33 @@
# This file was auto-generated by Fern from our API Definition.
from .types import (
+ PromptLogRequestPrompt,
PromptLogRequestToolChoice,
PromptLogUpdateRequestToolChoice,
PromptRequestReasoningEffort,
PromptRequestStop,
PromptRequestTemplate,
+ PromptsCallRequestPrompt,
PromptsCallRequestToolChoice,
+ PromptsCallStreamRequestPrompt,
PromptsCallStreamRequestToolChoice,
)
from .requests import (
+ PromptLogRequestPromptParams,
PromptLogRequestToolChoiceParams,
PromptLogUpdateRequestToolChoiceParams,
PromptRequestReasoningEffortParams,
PromptRequestStopParams,
PromptRequestTemplateParams,
+ PromptsCallRequestPromptParams,
PromptsCallRequestToolChoiceParams,
+ PromptsCallStreamRequestPromptParams,
PromptsCallStreamRequestToolChoiceParams,
)
__all__ = [
+ "PromptLogRequestPrompt",
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoice",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoice",
@@ -30,8 +38,12 @@
"PromptRequestStopParams",
"PromptRequestTemplate",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPrompt",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoice",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPrompt",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoice",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/client.py b/src/humanloop/prompts/client.py
index d5de327b..a81e8411 100644
--- a/src/humanloop/prompts/client.py
+++ b/src/humanloop/prompts/client.py
@@ -5,7 +5,7 @@
from .raw_client import RawPromptsClient
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -13,11 +13,13 @@
from .requests.prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from ..types.log_response import LogResponse
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.prompt_response import PromptResponse
@@ -85,7 +87,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -166,8 +168,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -480,7 +485,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -538,8 +543,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -649,7 +657,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -707,8 +715,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -843,7 +854,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[PromptResponse]:
@@ -864,7 +875,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Prompts by
order : typing.Optional[SortOrder]
@@ -1607,7 +1618,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize a Prompt to the .prompt file format.
@@ -1633,7 +1644,8 @@ def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
@@ -1719,7 +1731,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1800,8 +1812,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2131,7 +2146,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2189,8 +2204,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2309,7 +2327,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2367,8 +2385,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2511,7 +2532,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[PromptResponse]:
@@ -2532,7 +2553,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Prompt. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Prompts by
order : typing.Optional[SortOrder]
@@ -3379,7 +3400,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> None:
+ ) -> str:
"""
Serialize a Prompt to the .prompt file format.
@@ -3405,7 +3426,8 @@ async def serialize(
Returns
-------
- None
+ str
+ Successful Response
Examples
--------
diff --git a/src/humanloop/prompts/raw_client.py b/src/humanloop/prompts/raw_client.py
index 2b907d91..d7346742 100644
--- a/src/humanloop/prompts/raw_client.py
+++ b/src/humanloop/prompts/raw_client.py
@@ -4,7 +4,7 @@
from ..core.client_wrapper import SyncClientWrapper
from ..requests.chat_message import ChatMessageParams
from .requests.prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
-from ..requests.prompt_kernel_request import PromptKernelRequestParams
+from .requests.prompt_log_request_prompt import PromptLogRequestPromptParams
import datetime as dt
from ..types.log_status import LogStatus
from ..core.request_options import RequestOptions
@@ -20,11 +20,13 @@
from ..types.log_response import LogResponse
from ..core.jsonable_encoder import jsonable_encoder
from .requests.prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
+from .requests.prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from ..requests.provider_api_keys import ProviderApiKeysParams
from ..types.prompt_call_stream_response import PromptCallStreamResponse
import httpx_sse
import contextlib
from .requests.prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .requests.prompts_call_request_prompt import PromptsCallRequestPromptParams
from ..types.prompt_call_response import PromptCallResponse
from ..types.model_endpoints import ModelEndpoints
from .requests.prompt_request_template import PromptRequestTemplateParams
@@ -73,7 +75,7 @@ def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -154,8 +156,11 @@ def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -249,7 +254,7 @@ def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -496,7 +501,7 @@ def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -554,8 +559,11 @@ def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -633,7 +641,7 @@ def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -706,7 +714,7 @@ def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -764,8 +772,11 @@ def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -843,7 +854,7 @@ def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -1754,7 +1765,7 @@ def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> HttpResponse[None]:
+ ) -> HttpResponse[str]:
"""
Serialize a Prompt to the .prompt file format.
@@ -1780,7 +1791,8 @@ def serialize(
Returns
-------
- HttpResponse[None]
+ HttpResponse[str]
+ Successful Response
"""
_response = self._client_wrapper.httpx_client.request(
f"prompts/{jsonable_encoder(id)}/serialize",
@@ -1793,7 +1805,7 @@ def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return HttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
@@ -1889,7 +1901,7 @@ async def log(
finish_reason: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptLogRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptLogRequestPromptParams] = OMIT,
start_time: typing.Optional[dt.datetime] = OMIT,
end_time: typing.Optional[dt.datetime] = OMIT,
output: typing.Optional[str] = OMIT,
@@ -1970,8 +1982,11 @@ async def log(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptLogRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
start_time : typing.Optional[dt.datetime]
When the logged event started.
@@ -2065,7 +2080,7 @@ async def log(
object_=tool_choice, annotation=PromptLogRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptLogRequestPromptParams, direction="write"
),
"start_time": start_time,
"end_time": end_time,
@@ -2312,7 +2327,7 @@ async def call_stream(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallStreamRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallStreamRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2370,8 +2385,11 @@ async def call_stream(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallStreamRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2449,7 +2467,7 @@ async def call_stream(
object_=tool_choice, annotation=PromptsCallStreamRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallStreamRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -2522,7 +2540,7 @@ async def call(
id: typing.Optional[str] = OMIT,
messages: typing.Optional[typing.Sequence[ChatMessageParams]] = OMIT,
tool_choice: typing.Optional[PromptsCallRequestToolChoiceParams] = OMIT,
- prompt: typing.Optional[PromptKernelRequestParams] = OMIT,
+ prompt: typing.Optional[PromptsCallRequestPromptParams] = OMIT,
inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
source: typing.Optional[str] = OMIT,
metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = OMIT,
@@ -2580,8 +2598,11 @@ async def call(
- `'required'` means the model must call one or more of the provided tools.
- `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- prompt : typing.Optional[PromptKernelRequestParams]
- Details of your Prompt. A new Prompt version will be created if the provided details are new.
+ prompt : typing.Optional[PromptsCallRequestPromptParams]
+ The Prompt configuration to use. Two formats are supported:
+ - An object representing the details of the Prompt configuration
+ - A string representing the raw contents of a .prompt file
+ A new Prompt version will be created if the provided details do not match any existing version.
inputs : typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]]
The inputs passed to the prompt template.
@@ -2659,7 +2680,7 @@ async def call(
object_=tool_choice, annotation=PromptsCallRequestToolChoiceParams, direction="write"
),
"prompt": convert_and_respect_annotation_metadata(
- object_=prompt, annotation=PromptKernelRequestParams, direction="write"
+ object_=prompt, annotation=PromptsCallRequestPromptParams, direction="write"
),
"inputs": inputs,
"source": source,
@@ -3572,7 +3593,7 @@ async def serialize(
version_id: typing.Optional[str] = None,
environment: typing.Optional[str] = None,
request_options: typing.Optional[RequestOptions] = None,
- ) -> AsyncHttpResponse[None]:
+ ) -> AsyncHttpResponse[str]:
"""
Serialize a Prompt to the .prompt file format.
@@ -3598,7 +3619,8 @@ async def serialize(
Returns
-------
- AsyncHttpResponse[None]
+ AsyncHttpResponse[str]
+ Successful Response
"""
_response = await self._client_wrapper.httpx_client.request(
f"prompts/{jsonable_encoder(id)}/serialize",
@@ -3611,7 +3633,7 @@ async def serialize(
)
try:
if 200 <= _response.status_code < 300:
- return AsyncHttpResponse(response=_response, data=None)
+ return _response.text # type: ignore
if _response.status_code == 422:
raise UnprocessableEntityError(
typing.cast(
diff --git a/src/humanloop/prompts/requests/__init__.py b/src/humanloop/prompts/requests/__init__.py
index 3971e252..ae1cfb6a 100644
--- a/src/humanloop/prompts/requests/__init__.py
+++ b/src/humanloop/prompts/requests/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPromptParams
from .prompt_log_request_tool_choice import PromptLogRequestToolChoiceParams
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoiceParams
from .prompt_request_reasoning_effort import PromptRequestReasoningEffortParams
from .prompt_request_stop import PromptRequestStopParams
from .prompt_request_template import PromptRequestTemplateParams
+from .prompts_call_request_prompt import PromptsCallRequestPromptParams
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoiceParams
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPromptParams
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoiceParams
__all__ = [
+ "PromptLogRequestPromptParams",
"PromptLogRequestToolChoiceParams",
"PromptLogUpdateRequestToolChoiceParams",
"PromptRequestReasoningEffortParams",
"PromptRequestStopParams",
"PromptRequestTemplateParams",
+ "PromptsCallRequestPromptParams",
"PromptsCallRequestToolChoiceParams",
+ "PromptsCallStreamRequestPromptParams",
"PromptsCallStreamRequestToolChoiceParams",
]
diff --git a/src/humanloop/prompts/requests/prompt_log_request_prompt.py b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
new file mode 100644
index 00000000..8473bb42
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptLogRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
new file mode 100644
index 00000000..7a236235
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..9524425b
--- /dev/null
+++ b/src/humanloop/prompts/requests/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...requests.prompt_kernel_request import PromptKernelRequestParams
+
+PromptsCallStreamRequestPromptParams = typing.Union[PromptKernelRequestParams, str]
diff --git a/src/humanloop/prompts/types/__init__.py b/src/humanloop/prompts/types/__init__.py
index 1b849e7d..40326bce 100644
--- a/src/humanloop/prompts/types/__init__.py
+++ b/src/humanloop/prompts/types/__init__.py
@@ -1,19 +1,25 @@
# This file was auto-generated by Fern from our API Definition.
+from .prompt_log_request_prompt import PromptLogRequestPrompt
from .prompt_log_request_tool_choice import PromptLogRequestToolChoice
from .prompt_log_update_request_tool_choice import PromptLogUpdateRequestToolChoice
from .prompt_request_reasoning_effort import PromptRequestReasoningEffort
from .prompt_request_stop import PromptRequestStop
from .prompt_request_template import PromptRequestTemplate
+from .prompts_call_request_prompt import PromptsCallRequestPrompt
from .prompts_call_request_tool_choice import PromptsCallRequestToolChoice
+from .prompts_call_stream_request_prompt import PromptsCallStreamRequestPrompt
from .prompts_call_stream_request_tool_choice import PromptsCallStreamRequestToolChoice
__all__ = [
+ "PromptLogRequestPrompt",
"PromptLogRequestToolChoice",
"PromptLogUpdateRequestToolChoice",
"PromptRequestReasoningEffort",
"PromptRequestStop",
"PromptRequestTemplate",
+ "PromptsCallRequestPrompt",
"PromptsCallRequestToolChoice",
+ "PromptsCallStreamRequestPrompt",
"PromptsCallStreamRequestToolChoice",
]
diff --git a/src/humanloop/prompts/types/prompt_log_request_prompt.py b/src/humanloop/prompts/types/prompt_log_request_prompt.py
new file mode 100644
index 00000000..4a0791dc
--- /dev/null
+++ b/src/humanloop/prompts/types/prompt_log_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptLogRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_request_prompt.py b/src/humanloop/prompts/types/prompts_call_request_prompt.py
new file mode 100644
index 00000000..78a9f5a1
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
new file mode 100644
index 00000000..71376823
--- /dev/null
+++ b/src/humanloop/prompts/types/prompts_call_stream_request_prompt.py
@@ -0,0 +1,6 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+from ...types.prompt_kernel_request import PromptKernelRequest
+
+PromptsCallStreamRequestPrompt = typing.Union[PromptKernelRequest, str]
diff --git a/src/humanloop/requests/agent_continue_response.py b/src/humanloop/requests/agent_continue_response.py
deleted file mode 100644
index 8300667b..00000000
--- a/src/humanloop/requests/agent_continue_response.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .chat_message import ChatMessageParams
-import typing
-from .agent_continue_response_tool_choice import AgentContinueResponseToolChoiceParams
-from .agent_response import AgentResponseParams
-import datetime as dt
-from ..types.log_status import LogStatus
-from .evaluator_log_response import EvaluatorLogResponseParams
-from .log_response import LogResponseParams
-
-
-class AgentContinueResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing_extensions.NotRequired[int]
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing_extensions.NotRequired[int]
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing_extensions.NotRequired[float]
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing_extensions.NotRequired[str]
- """
- Reason the generation finished.
- """
-
- messages: typing_extensions.NotRequired[typing.Sequence[ChatMessageParams]]
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing_extensions.NotRequired[AgentContinueResponseToolChoiceParams]
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponseParams
- """
- Agent that generated the Log.
- """
-
- start_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event started.
- """
-
- end_time: typing_extensions.NotRequired[dt.datetime]
- """
- When the logged event ended.
- """
-
- output: typing_extensions.NotRequired[str]
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing_extensions.NotRequired[dt.datetime]
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing_extensions.NotRequired[str]
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing_extensions.NotRequired[float]
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing_extensions.NotRequired[str]
- """
- Captured log and debug statements.
- """
-
- provider_request: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw request sent to provider.
- """
-
- provider_response: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Raw response received the provider.
- """
-
- inputs: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- The inputs passed to the prompt template.
- """
-
- source: typing_extensions.NotRequired[str]
- """
- Identifies where the model was called from.
- """
-
- metadata: typing_extensions.NotRequired[typing.Dict[str, typing.Optional[typing.Any]]]
- """
- Any additional metadata to record.
- """
-
- log_status: typing_extensions.NotRequired[LogStatus]
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing_extensions.NotRequired[str]
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing_extensions.NotRequired[str]
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing_extensions.NotRequired[typing.Sequence[str]]
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing_extensions.NotRequired[str]
- """
- End-user ID related to the Log.
- """
-
- environment: typing_extensions.NotRequired[str]
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing_extensions.NotRequired[bool]
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing_extensions.NotRequired[str]
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.Sequence[EvaluatorLogResponseParams]
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing_extensions.NotRequired[str]
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing_extensions.NotRequired[typing.Sequence[LogResponseParams]]
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing_extensions.NotRequired[ChatMessageParams]
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
diff --git a/src/humanloop/requests/agent_continue_response_tool_choice.py b/src/humanloop/requests/agent_continue_response_tool_choice.py
deleted file mode 100644
index 24b044cc..00000000
--- a/src/humanloop/requests/agent_continue_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoiceParams
-
-AgentContinueResponseToolChoiceParams = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoiceParams
-]
diff --git a/src/humanloop/requests/agent_continue_stream_response.py b/src/humanloop/requests/agent_continue_stream_response.py
deleted file mode 100644
index 1038e000..00000000
--- a/src/humanloop/requests/agent_continue_stream_response.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing_extensions
-import typing_extensions
-from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayloadParams
-from ..types.event_type import EventType
-import datetime as dt
-
-
-class AgentContinueStreamResponseParams(typing_extensions.TypedDict):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing_extensions.NotRequired[AgentContinueStreamResponsePayloadParams]
- type: EventType
- created_at: dt.datetime
diff --git a/src/humanloop/requests/agent_continue_stream_response_payload.py b/src/humanloop/requests/agent_continue_stream_response_payload.py
deleted file mode 100644
index ddd74c10..00000000
--- a/src/humanloop/requests/agent_continue_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponseParams
-from .log_response import LogResponseParams
-from .tool_call import ToolCallParams
-
-AgentContinueStreamResponsePayloadParams = typing.Union[LogStreamResponseParams, LogResponseParams, ToolCallParams]
diff --git a/src/humanloop/requests/agent_response.py b/src/humanloop/requests/agent_response.py
index 047904a7..08303ec0 100644
--- a/src/humanloop/requests/agent_response.py
+++ b/src/humanloop/requests/agent_response.py
@@ -235,3 +235,8 @@ class AgentResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Agent Version.
"""
+
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Agent. Corresponds to the .agent file.
+ """
diff --git a/src/humanloop/requests/populate_template_response.py b/src/humanloop/requests/populate_template_response.py
index 491cacd3..09f68797 100644
--- a/src/humanloop/requests/populate_template_response.py
+++ b/src/humanloop/requests/populate_template_response.py
@@ -218,6 +218,11 @@ class PopulateTemplateResponseParams(typing_extensions.TypedDict):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing_extensions.NotRequired[PopulateTemplateResponsePopulatedTemplateParams]
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/requests/prompt_response.py b/src/humanloop/requests/prompt_response.py
index b6ff03df..1918f87b 100644
--- a/src/humanloop/requests/prompt_response.py
+++ b/src/humanloop/requests/prompt_response.py
@@ -220,3 +220,8 @@ class PromptResponseParams(typing_extensions.TypedDict):
"""
Aggregation of Evaluator results for the Prompt Version.
"""
+
+ raw_file_content: typing_extensions.NotRequired[str]
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
diff --git a/src/humanloop/sync/__init__.py b/src/humanloop/sync/__init__.py
new file mode 100644
index 00000000..007659df
--- /dev/null
+++ b/src/humanloop/sync/__init__.py
@@ -0,0 +1,3 @@
+from humanloop.sync.sync_client import SyncClient
+
+__all__ = ["SyncClient"]
diff --git a/src/humanloop/sync/sync_client.py b/src/humanloop/sync/sync_client.py
new file mode 100644
index 00000000..d71f1568
--- /dev/null
+++ b/src/humanloop/sync/sync_client.py
@@ -0,0 +1,374 @@
+import logging
+from pathlib import Path
+from typing import List, Tuple, TYPE_CHECKING
+from functools import lru_cache
+import typing
+import time
+from humanloop.error import HumanloopRuntimeError
+import json
+
+if TYPE_CHECKING:
+ from humanloop.base_client import BaseHumanloop
+
+# Set up logging
+logger = logging.getLogger("humanloop.sdk.sync")
+logger.setLevel(logging.INFO)
+console_handler = logging.StreamHandler()
+formatter = logging.Formatter("%(message)s")
+console_handler.setFormatter(formatter)
+if not logger.hasHandlers():
+ logger.addHandler(console_handler)
+
+# Default cache size for file content caching
+DEFAULT_CACHE_SIZE = 100
+
+
+def format_api_error(error: Exception) -> str:
+ """Format API error messages to be more user-friendly."""
+ error_msg = str(error)
+ if "status_code" not in error_msg or "body" not in error_msg:
+ return error_msg
+
+ try:
+ # Extract the body part and parse as JSON
+ body_str = error_msg.split("body: ")[1]
+ # Convert Python dict string to valid JSON by:
+ # 1. Escaping double quotes
+ # 2. Replacing single quotes with double quotes
+ body_str = body_str.replace('"', '\\"').replace("'", '"')
+ body = json.loads(body_str)
+
+ # Get the detail from the body
+ detail = body.get("detail", {})
+
+ # Handle both string and dictionary types for detail
+ if isinstance(detail, str):
+ return detail
+ elif isinstance(detail, dict):
+ return detail.get("description") or detail.get("msg") or error_msg
+ else:
+ return error_msg
+ except Exception as e:
+ logger.debug(f"Failed to parse error message: {str(e)}")
+ return error_msg
+
+
+SerializableFileType = typing.Literal["prompt", "agent"]
+
+
+class SyncClient:
+ """Client for managing synchronization between local filesystem and Humanloop.
+
+ This client provides file synchronization between Humanloop and the local filesystem,
+ with built-in caching for improved performance. The cache uses Python's LRU (Least
+ Recently Used) cache to automatically manage memory usage by removing least recently
+ accessed files when the cache is full.
+
+ The cache is automatically updated when files are pulled or saved, and can be
+ manually cleared using the clear_cache() method.
+ """
+
+ # File types that can be serialized to/from the filesystem
+ SERIALIZABLE_FILE_TYPES = frozenset(typing.get_args(SerializableFileType))
+
+ def __init__(
+ self,
+ client: "BaseHumanloop",
+ base_dir: str = "humanloop",
+ cache_size: int = DEFAULT_CACHE_SIZE,
+ log_level: int = logging.WARNING,
+ ):
+ """
+ Parameters
+ ----------
+ client: Humanloop client instance
+ base_dir: Base directory for synced files (default: "humanloop")
+ cache_size: Maximum number of files to cache (default: DEFAULT_CACHE_SIZE)
+ log_level: Log level for logging (default: WARNING)
+ """
+ self.client = client
+ self.base_dir = Path(base_dir)
+ self._cache_size = cache_size
+
+ logger.setLevel(log_level)
+
+ # Create a new cached version of get_file_content with the specified cache size
+ self.get_file_content = lru_cache(maxsize=cache_size)( # type: ignore [assignment]
+ self._get_file_content_implementation,
+ )
+
+ def _get_file_content_implementation(self, path: str, file_type: SerializableFileType) -> str:
+ """Implementation of get_file_content without the cache decorator.
+
+ This is the actual implementation that gets wrapped by lru_cache.
+
+ Args:
+ path: The normalized path to the file (without extension)
+ file_type: The type of file to get the content of (SerializableFileType)
+
+ Returns:
+ The raw file content
+
+ Raises:
+ HumanloopRuntimeError: In two cases:
+ 1. If the file doesn't exist at the expected location
+ 2. If there's a filesystem error when trying to read the file
+ (e.g., permission denied, file is locked, etc.)
+ """
+ # Construct path to local file
+ local_path = self.base_dir / path
+ # Add appropriate extension
+ local_path = local_path.parent / f"{local_path.stem}.{file_type}"
+
+ if not local_path.exists():
+ raise HumanloopRuntimeError(f"Local file not found: {local_path}")
+
+ try:
+ # Read the raw file content
+ with open(local_path) as f:
+ file_content = f.read()
+ logger.debug(f"Using local file content from {local_path}")
+ return file_content
+ except Exception as e:
+ raise HumanloopRuntimeError(f"Error reading local file {local_path}: {str(e)}")
+
+ def get_file_content(self, path: str, file_type: SerializableFileType) -> str:
+ """Get the raw file content of a file from cache or filesystem.
+
+ This method uses an LRU cache to store file contents. When the cache is full,
+ the least recently accessed files are automatically removed to make space.
+
+ Args:
+ path: The normalized path to the file (without extension)
+ file_type: The type of file (Prompt or Agent)
+
+ Returns:
+ The raw file content
+
+ Raises:
+ HumanloopRuntimeError: If the file doesn't exist or can't be read
+ """
+ return self._get_file_content_implementation(path, file_type)
+
+ def clear_cache(self) -> None:
+ """Clear the LRU cache."""
+ self.get_file_content.cache_clear() # type: ignore [attr-defined]
+
+ def _normalize_path(self, path: str) -> str:
+ """Normalize the path by:
+ 1. Converting to a Path object to handle platform-specific separators
+ 2. Removing any file extensions
+ 3. Converting to a string with forward slashes and no leading/trailing slashes
+ """
+ # Convert to Path object to handle platform-specific separators
+ path_obj = Path(path)
+
+ # Reject absolute paths to ensure all paths are relative to base_dir.
+ # This maintains consistency with the remote filesystem where paths are relative to project root.
+ if path_obj.is_absolute():
+ raise HumanloopRuntimeError(
+ f"Absolute paths are not supported: `{path}`. "
+ f"Paths should be relative to the base directory (`{self.base_dir}`)."
+ )
+
+ # Remove extension, convert to string with forward slashes, and remove leading/trailing slashes
+ normalized = str(path_obj.with_suffix(""))
+ # Replace all backslashes and normalize multiple forward slashes
+ return "/".join(part for part in normalized.replace("\\", "/").split("/") if part)
+
+ def is_file(self, path: str) -> bool:
+ """Check if the path is a file by checking for .{file_type} extension for serializable file types."""
+ return path.endswith(tuple(f".{file_type}" for file_type in self.SERIALIZABLE_FILE_TYPES))
+
+ def _save_serialized_file(
+ self,
+ serialized_content: str,
+ file_path: str,
+ file_type: SerializableFileType,
+ ) -> None:
+ """Save serialized file to local filesystem."""
+ try:
+ # Create full path including base_dir prefix
+ full_path = self.base_dir / file_path
+ # Create directory if it doesn't exist
+ full_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Add file type extension
+ new_path = full_path.parent / f"{full_path.stem}.{file_type}"
+
+ # Write raw file content to file
+ with open(new_path, "w") as f:
+ f.write(serialized_content)
+ except Exception as e:
+ logger.error(f"Failed to write {file_type} {file_path} to disk: {str(e)}")
+ raise
+
+ def _pull_file(self, path: str, environment: str | None = None) -> bool:
+ """Pull a specific file from Humanloop to local filesystem.
+
+ Returns:
+ True if the file was successfully pulled, False otherwise
+ """
+ try:
+ file = self.client.files.retrieve_by_path(
+ path=path,
+ environment=environment,
+ include_raw_file_content=True,
+ )
+
+ if file.type not in self.SERIALIZABLE_FILE_TYPES:
+ logger.error(f"Unsupported file type: {file.type}")
+ return False
+
+ if not file.raw_file_content: # type: ignore [union-attr]
+ logger.error(f"No content found for {file.type} {path}")
+ return False
+
+ self._save_serialized_file(
+ serialized_content=file.raw_file_content, # type: ignore [union-attr]
+ file_path=file.path,
+ file_type=typing.cast(SerializableFileType, file.type),
+ )
+ return True
+ except Exception as e:
+ logger.error(f"Failed to pull file {path}: {str(e)}")
+ return False
+
+ def _pull_directory(
+ self,
+ path: str | None = None,
+ environment: str | None = None,
+ ) -> Tuple[List[str], List[str]]:
+ """Sync Prompt and Agent files from Humanloop to local filesystem.
+
+ Returns:
+ Tuple of two lists:
+ - First list contains paths of successfully synced files
+ - Second list contains paths of files that failed to sync.
+ Failures can occur due to missing content in the response or errors during local file writing.
+
+ Raises:
+ HumanloopRuntimeError: If there's an error communicating with the API
+ """
+ successful_files = []
+ failed_files = []
+ page = 1
+
+ logger.debug(f"Fetching files from directory: {path or '(root)'} in environment: {environment or '(default)'}")
+
+ while True:
+ try:
+ logger.debug(f"`{path}`: Requesting page {page} of files")
+ response = self.client.files.list_files(
+ type=list(self.SERIALIZABLE_FILE_TYPES),
+ page=page,
+ size=100,
+ include_raw_file_content=True,
+ environment=environment,
+ path=path,
+ )
+
+ if len(response.records) == 0:
+ logger.debug(f"Finished reading files for path `{path}`")
+ break
+
+ logger.debug(f"`{path}`: Read page {page} containing {len(response.records)} files")
+
+ # Process each file
+ for file in response.records:
+ # Skip if not a serializable file type
+ if file.type not in self.SERIALIZABLE_FILE_TYPES:
+ logger.warning(f"Skipping unsupported file type: {file.type}")
+ continue
+
+ file_type: SerializableFileType = typing.cast(
+ SerializableFileType,
+ file.type,
+ )
+
+ # Skip if no raw file content
+ if not getattr(file, "raw_file_content", None) or not file.raw_file_content: # type: ignore [union-attr]
+ logger.warning(f"No content found for {file.type} {file.path}")
+ failed_files.append(file.path)
+ continue
+
+ try:
+ logger.debug(f"Writing {file.type} {file.path} to disk")
+ self._save_serialized_file(
+ serialized_content=file.raw_file_content, # type: ignore [union-attr]
+ file_path=file.path,
+ file_type=file_type,
+ )
+ successful_files.append(file.path)
+ except Exception as e:
+ failed_files.append(file.path)
+ logger.error(f"Failed to save {file.path}: {str(e)}")
+
+ page += 1
+ except Exception as e:
+ formatted_error = format_api_error(e)
+ raise HumanloopRuntimeError(f"Failed to fetch page {page}: {formatted_error}")
+
+ if successful_files:
+ logger.info(f"Successfully pulled {len(successful_files)} files")
+ if failed_files:
+ logger.warning(f"Failed to pull {len(failed_files)} files")
+
+ return successful_files, failed_files
+
+ def pull(self, path: str | None = None, environment: str | None = None) -> Tuple[List[str], List[str]]:
+ """Pull files from Humanloop to local filesystem.
+
+ If the path ends with .prompt or .agent, pulls that specific file.
+ Otherwise, pulls all files under the specified path.
+ If no path is provided, pulls all files from the root.
+
+ Args:
+ path: The path to pull from (either a specific file or directory)
+ environment: The environment to pull from
+
+ Returns:
+ Tuple of two lists:
+ - First list contains paths of successfully synced files
+ - Second list contains paths of files that failed to sync (e.g. failed to write to disk or missing raw content)
+
+ Raises:
+ HumanloopRuntimeError: If there's an error communicating with the API
+ """
+ start_time = time.time()
+ normalized_path = self._normalize_path(path) if path else None
+
+ logger.info(
+ f"Starting pull operation: path={normalized_path or '(root)'}, environment={environment or '(default)'}"
+ )
+
+ try:
+ if normalized_path is None or path is None: # path being None means normalized_path is None, but we check both for improved type safety
+ # Pull all files from the root
+ logger.debug("Pulling all files from root")
+ successful_files, failed_files = self._pull_directory(
+ path=None,
+ environment=environment,
+ )
+ else:
+ if self.is_file(path.strip()):
+ logger.debug(f"Pulling file: {normalized_path}")
+ if self._pull_file(path=normalized_path, environment=environment):
+ successful_files = [path]
+ failed_files = []
+ else:
+ successful_files = []
+ failed_files = [path]
+ else:
+ logger.debug(f"Pulling directory: {normalized_path}")
+ successful_files, failed_files = self._pull_directory(normalized_path, environment)
+
+ # Clear the cache at the end of each pull operation
+ self.clear_cache()
+
+ duration_ms = int((time.time() - start_time) * 1000)
+ logger.info(f"Pull completed in {duration_ms}ms: {len(successful_files)} files succeeded")
+
+ return successful_files, failed_files
+ except Exception as e:
+ raise HumanloopRuntimeError(f"Pull operation failed: {str(e)}")
diff --git a/src/humanloop/tools/client.py b/src/humanloop/tools/client.py
index ea6b14a2..f58fc3d8 100644
--- a/src/humanloop/tools/client.py
+++ b/src/humanloop/tools/client.py
@@ -10,7 +10,7 @@
from ..types.tool_call_response import ToolCallResponse
from ..types.create_tool_log_response import CreateToolLogResponse
from ..types.log_response import LogResponse
-from ..types.project_sort_by import ProjectSortBy
+from ..types.file_sort_by import FileSortBy
from ..types.sort_order import SortOrder
from ..core.pagination import SyncPager
from ..types.tool_response import ToolResponse
@@ -479,7 +479,7 @@ def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> SyncPager[ToolResponse]:
@@ -500,7 +500,7 @@ def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Tools by
order : typing.Optional[SortOrder]
@@ -1666,7 +1666,7 @@ async def list(
size: typing.Optional[int] = None,
name: typing.Optional[str] = None,
user_filter: typing.Optional[str] = None,
- sort_by: typing.Optional[ProjectSortBy] = None,
+ sort_by: typing.Optional[FileSortBy] = None,
order: typing.Optional[SortOrder] = None,
request_options: typing.Optional[RequestOptions] = None,
) -> AsyncPager[ToolResponse]:
@@ -1687,7 +1687,7 @@ async def list(
user_filter : typing.Optional[str]
Case-insensitive filter for users in the Tool. This filter matches against both email address and name of users.
- sort_by : typing.Optional[ProjectSortBy]
+ sort_by : typing.Optional[FileSortBy]
Field to sort Tools by
order : typing.Optional[SortOrder]
diff --git a/src/humanloop/types/__init__.py b/src/humanloop/types/__init__.py
index 7c1d30f5..7d863134 100644
--- a/src/humanloop/types/__init__.py
+++ b/src/humanloop/types/__init__.py
@@ -94,6 +94,7 @@
from .file_id import FileId
from .file_path import FilePath
from .file_request import FileRequest
+from .file_sort_by import FileSortBy
from .file_type import FileType
from .files_tool_type import FilesToolType
from .flow_kernel_request import FlowKernelRequest
@@ -155,7 +156,6 @@
from .populate_template_response_reasoning_effort import PopulateTemplateResponseReasoningEffort
from .populate_template_response_stop import PopulateTemplateResponseStop
from .populate_template_response_template import PopulateTemplateResponseTemplate
-from .project_sort_by import ProjectSortBy
from .prompt_call_log_response import PromptCallLogResponse
from .prompt_call_response import PromptCallResponse
from .prompt_call_response_tool_choice import PromptCallResponseToolChoice
@@ -299,6 +299,7 @@
"FileId",
"FilePath",
"FileRequest",
+ "FileSortBy",
"FileType",
"FilesToolType",
"FlowKernelRequest",
@@ -356,7 +357,6 @@
"PopulateTemplateResponseReasoningEffort",
"PopulateTemplateResponseStop",
"PopulateTemplateResponseTemplate",
- "ProjectSortBy",
"PromptCallLogResponse",
"PromptCallResponse",
"PromptCallResponseToolChoice",
diff --git a/src/humanloop/types/agent_continue_response.py b/src/humanloop/types/agent_continue_response.py
deleted file mode 100644
index 0bbd7858..00000000
--- a/src/humanloop/types/agent_continue_response.py
+++ /dev/null
@@ -1,224 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_response import AgentResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_response import PromptResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-from .agent_log_response import AgentLogResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .flow_log_response import FlowLogResponse
-from .prompt_log_response import PromptLogResponse
-from .tool_log_response import ToolLogResponse
-import typing
-from .chat_message import ChatMessage
-import pydantic
-from .agent_continue_response_tool_choice import AgentContinueResponseToolChoice
-import datetime as dt
-from .log_status import LogStatus
-from .log_response import LogResponse
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-
-
-class AgentContinueResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call.
- """
-
- output_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The message returned by the provider.
- """
-
- prompt_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the prompt used to generate the output.
- """
-
- reasoning_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of reasoning tokens used to generate the output.
- """
-
- output_tokens: typing.Optional[int] = pydantic.Field(default=None)
- """
- Number of tokens in the output generated by the model.
- """
-
- prompt_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the prompt.
- """
-
- output_cost: typing.Optional[float] = pydantic.Field(default=None)
- """
- Cost in dollars associated to the tokens in the output.
- """
-
- finish_reason: typing.Optional[str] = pydantic.Field(default=None)
- """
- Reason the generation finished.
- """
-
- messages: typing.Optional[typing.List[ChatMessage]] = pydantic.Field(default=None)
- """
- The messages passed to the to provider chat endpoint.
- """
-
- tool_choice: typing.Optional[AgentContinueResponseToolChoice] = pydantic.Field(default=None)
- """
- Controls how the model uses tools. The following options are supported:
- - `'none'` means the model will not call any tool and instead generates a message; this is the default when no tools are provided as part of the Prompt.
- - `'auto'` means the model can decide to call one or more of the provided tools; this is the default when tools are provided as part of the Prompt.
- - `'required'` means the model must call one or more of the provided tools.
- - `{'type': 'function', 'function': {name': }}` forces the model to use the named function.
- """
-
- agent: AgentResponse = pydantic.Field()
- """
- Agent that generated the Log.
- """
-
- start_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event started.
- """
-
- end_time: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- When the logged event ended.
- """
-
- output: typing.Optional[str] = pydantic.Field(default=None)
- """
- Generated output from your model for the provided inputs. Can be `None` if logging an error, or if creating a parent Log with the intention to populate it later.
- """
-
- created_at: typing.Optional[dt.datetime] = pydantic.Field(default=None)
- """
- User defined timestamp for when the log was created.
- """
-
- error: typing.Optional[str] = pydantic.Field(default=None)
- """
- Error message if the log is an error.
- """
-
- provider_latency: typing.Optional[float] = pydantic.Field(default=None)
- """
- Duration of the logged event in seconds.
- """
-
- stdout: typing.Optional[str] = pydantic.Field(default=None)
- """
- Captured log and debug statements.
- """
-
- provider_request: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw request sent to provider.
- """
-
- provider_response: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Raw response received the provider.
- """
-
- inputs: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- The inputs passed to the prompt template.
- """
-
- source: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifies where the model was called from.
- """
-
- metadata: typing.Optional[typing.Dict[str, typing.Optional[typing.Any]]] = pydantic.Field(default=None)
- """
- Any additional metadata to record.
- """
-
- log_status: typing.Optional[LogStatus] = pydantic.Field(default=None)
- """
- Status of the Agent Log. If `incomplete`, the Agent turn was suspended due to a tool call and can be continued by calling /agents/continue with responses to the Agent's last message (which should contain tool calls). See the `previous_agent_message` field for easy access to the Agent's last message.
- """
-
- source_datapoint_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Unique identifier for the Datapoint that this Log is derived from. This can be used by Humanloop to associate Logs to Evaluations. If provided, Humanloop will automatically associate this Log to Evaluations that require a Log for this Datapoint-Version pair.
- """
-
- trace_parent_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- The ID of the parent Log to nest this Log under in a Trace.
- """
-
- batches: typing.Optional[typing.List[str]] = pydantic.Field(default=None)
- """
- Array of Batch IDs that this Log is part of. Batches are used to group Logs together for offline Evaluations
- """
-
- user: typing.Optional[str] = pydantic.Field(default=None)
- """
- End-user ID related to the Log.
- """
-
- environment: typing.Optional[str] = pydantic.Field(default=None)
- """
- The name of the Environment the Log is associated to.
- """
-
- save: typing.Optional[bool] = pydantic.Field(default=None)
- """
- Whether the request/response payloads will be stored on Humanloop.
- """
-
- log_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- This will identify a Log. If you don't provide a Log ID, Humanloop will generate one for you.
- """
-
- id: str = pydantic.Field()
- """
- Unique identifier for the Log.
- """
-
- evaluator_logs: typing.List[EvaluatorLogResponse] = pydantic.Field()
- """
- List of Evaluator Logs associated with the Log. These contain Evaluator judgments on the Log.
- """
-
- trace_flow_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Flow that the Trace belongs to.
- """
-
- trace_id: typing.Optional[str] = pydantic.Field(default=None)
- """
- Identifier for the Trace that the Log belongs to.
- """
-
- trace_children: typing.Optional[typing.List[LogResponse]] = pydantic.Field(default=None)
- """
- Logs nested under this Log in the Trace.
- """
-
- previous_agent_message: typing.Optional[ChatMessage] = pydantic.Field(default=None)
- """
- The Agent's last message, which should contain tool calls. Only populated if the Log is incomplete due to a suspended Agent turn with tool calls. This is useful for continuing the Agent call by calling /agents/continue.
- """
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_response_tool_choice.py b/src/humanloop/types/agent_continue_response_tool_choice.py
deleted file mode 100644
index 20f3fb75..00000000
--- a/src/humanloop/types/agent_continue_response_tool_choice.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .tool_choice import ToolChoice
-
-AgentContinueResponseToolChoice = typing.Union[
- typing.Literal["none"], typing.Literal["auto"], typing.Literal["required"], ToolChoice
-]
diff --git a/src/humanloop/types/agent_continue_stream_response.py b/src/humanloop/types/agent_continue_stream_response.py
deleted file mode 100644
index ff7a0fac..00000000
--- a/src/humanloop/types/agent_continue_stream_response.py
+++ /dev/null
@@ -1,44 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-from ..core.unchecked_base_model import UncheckedBaseModel
-from .agent_linked_file_response import AgentLinkedFileResponse
-from .agent_log_response import AgentLogResponse
-from .agent_response import AgentResponse
-from .evaluator_log_response import EvaluatorLogResponse
-from .evaluator_response import EvaluatorResponse
-from .flow_log_response import FlowLogResponse
-from .flow_response import FlowResponse
-from .monitoring_evaluator_response import MonitoringEvaluatorResponse
-from .prompt_log_response import PromptLogResponse
-from .prompt_response import PromptResponse
-from .tool_log_response import ToolLogResponse
-from .tool_response import ToolResponse
-from .version_deployment_response import VersionDeploymentResponse
-from .version_id_response import VersionIdResponse
-import typing
-from .agent_continue_stream_response_payload import AgentContinueStreamResponsePayload
-from .event_type import EventType
-import datetime as dt
-from ..core.pydantic_utilities import IS_PYDANTIC_V2
-import pydantic
-
-
-class AgentContinueStreamResponse(UncheckedBaseModel):
- """
- Response model for continuing an Agent call in streaming mode.
- """
-
- log_id: str
- message: str
- payload: typing.Optional[AgentContinueStreamResponsePayload] = None
- type: EventType
- created_at: dt.datetime
-
- if IS_PYDANTIC_V2:
- model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
- else:
-
- class Config:
- frozen = True
- smart_union = True
- extra = pydantic.Extra.allow
diff --git a/src/humanloop/types/agent_continue_stream_response_payload.py b/src/humanloop/types/agent_continue_stream_response_payload.py
deleted file mode 100644
index 0e5f8a58..00000000
--- a/src/humanloop/types/agent_continue_stream_response_payload.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-from .log_stream_response import LogStreamResponse
-from .log_response import LogResponse
-from .tool_call import ToolCall
-
-AgentContinueStreamResponsePayload = typing.Union[LogStreamResponse, LogResponse, ToolCall]
diff --git a/src/humanloop/types/agent_response.py b/src/humanloop/types/agent_response.py
index e58aaeba..fa72a361 100644
--- a/src/humanloop/types/agent_response.py
+++ b/src/humanloop/types/agent_response.py
@@ -237,6 +237,11 @@ class AgentResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Agent Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Agent. Corresponds to the .agent file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/file_sort_by.py b/src/humanloop/types/file_sort_by.py
new file mode 100644
index 00000000..b3135c3b
--- /dev/null
+++ b/src/humanloop/types/file_sort_by.py
@@ -0,0 +1,5 @@
+# This file was auto-generated by Fern from our API Definition.
+
+import typing
+
+FileSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/humanloop/types/populate_template_response.py b/src/humanloop/types/populate_template_response.py
index efcd1d0c..4dad70b1 100644
--- a/src/humanloop/types/populate_template_response.py
+++ b/src/humanloop/types/populate_template_response.py
@@ -231,6 +231,11 @@ class PopulateTemplateResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
populated_template: typing.Optional[PopulateTemplateResponsePopulatedTemplate] = pydantic.Field(default=None)
"""
The template populated with the input values you provided in the request. Returns None if no template exists.
diff --git a/src/humanloop/types/project_sort_by.py b/src/humanloop/types/project_sort_by.py
deleted file mode 100644
index b8265b56..00000000
--- a/src/humanloop/types/project_sort_by.py
+++ /dev/null
@@ -1,5 +0,0 @@
-# This file was auto-generated by Fern from our API Definition.
-
-import typing
-
-ProjectSortBy = typing.Union[typing.Literal["created_at", "updated_at", "name"], typing.Any]
diff --git a/src/humanloop/types/prompt_response.py b/src/humanloop/types/prompt_response.py
index 5d6ff870..59f00d1c 100644
--- a/src/humanloop/types/prompt_response.py
+++ b/src/humanloop/types/prompt_response.py
@@ -223,6 +223,11 @@ class PromptResponse(UncheckedBaseModel):
Aggregation of Evaluator results for the Prompt Version.
"""
+ raw_file_content: typing.Optional[str] = pydantic.Field(default=None)
+ """
+ The raw content of the Prompt. Corresponds to the .prompt file.
+ """
+
if IS_PYDANTIC_V2:
model_config: typing.ClassVar[pydantic.ConfigDict] = pydantic.ConfigDict(extra="allow", frozen=True) # type: ignore # Pydantic v2
else:
diff --git a/src/humanloop/types/version_id_response_version.py b/src/humanloop/types/version_id_response_version.py
index 1b74199f..b1cbd45d 100644
--- a/src/humanloop/types/version_id_response_version.py
+++ b/src/humanloop/types/version_id_response_version.py
@@ -3,6 +3,7 @@
from __future__ import annotations
import typing
from .dataset_response import DatasetResponse
+import typing
if typing.TYPE_CHECKING:
from .prompt_response import PromptResponse
@@ -11,10 +12,5 @@
from .flow_response import FlowResponse
from .agent_response import AgentResponse
VersionIdResponseVersion = typing.Union[
- "PromptResponse",
- "ToolResponse",
- DatasetResponse,
- "EvaluatorResponse",
- "FlowResponse",
- "AgentResponse",
+ "PromptResponse", "ToolResponse", DatasetResponse, "EvaluatorResponse", "FlowResponse", "AgentResponse"
]
diff --git a/src/humanloop/types/version_reference_response.py b/src/humanloop/types/version_reference_response.py
index a6a7783c..399361c8 100644
--- a/src/humanloop/types/version_reference_response.py
+++ b/src/humanloop/types/version_reference_response.py
@@ -2,6 +2,7 @@
from __future__ import annotations
import typing
+import typing
if typing.TYPE_CHECKING:
from .version_deployment_response import VersionDeploymentResponse
diff --git a/tests/custom/README.md b/tests/custom/README.md
new file mode 100644
index 00000000..14ff7ed4
--- /dev/null
+++ b/tests/custom/README.md
@@ -0,0 +1,19 @@
+# Custom Tests Directory
+
+This directory contains custom tests for the Humanloop Python SDK. While the main SDK is auto-generated using [Fern](https://buildwithfern.com/), this directory allows us to add our own test implementations that won't be overwritten during regeneration.
+
+## Why Custom Tests?
+
+- **Preservation**: Tests in this directory won't be overwritten when regenerating the SDK
+- **Custom Implementation**: Allows testing of our own implementations beyond the auto-generated code
+- **Integration**: Enables testing of how our custom code works with the auto-generated SDK
+
+## Running Tests
+
+```bash
+# Run all custom tests
+pytest tests/custom/
+
+# Run specific test file
+pytest tests/custom/sync/test_sync_client.py
+```
diff --git a/tests/otel/__init__.py b/tests/custom/__init__.py
similarity index 100%
rename from tests/otel/__init__.py
rename to tests/custom/__init__.py
diff --git a/tests/assets/exact_match.py b/tests/custom/assets/exact_match.py
similarity index 100%
rename from tests/assets/exact_match.py
rename to tests/custom/assets/exact_match.py
diff --git a/tests/assets/levenshtein.py b/tests/custom/assets/levenshtein.py
similarity index 100%
rename from tests/assets/levenshtein.py
rename to tests/custom/assets/levenshtein.py
diff --git a/tests/conftest.py b/tests/custom/conftest.py
similarity index 58%
rename from tests/conftest.py
rename to tests/custom/conftest.py
index 80e3b336..7667dedf 100644
--- a/tests/conftest.py
+++ b/tests/custom/conftest.py
@@ -1,15 +1,9 @@
-from dataclasses import asdict, dataclass
+from typing import Generator
import os
-import random
-import string
-import time
-from typing import Callable, Generator
-import typing
+from dotenv import load_dotenv
from unittest.mock import MagicMock
-from dotenv import load_dotenv
import pytest
-from humanloop.base_client import BaseHumanloop
from humanloop.client import Humanloop
from humanloop.otel.exporter import HumanloopSpanExporter
from humanloop.otel.processor import HumanloopSpanProcessor
@@ -25,9 +19,7 @@
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter
from opentelemetry.trace import Tracer
-
-if typing.TYPE_CHECKING:
- from humanloop.client import BaseHumanloop
+from tests.custom.types import GetHumanloopClientFn
@pytest.fixture(scope="function")
@@ -87,10 +79,24 @@ def opentelemetry_test_configuration(
instrumentor.uninstrument()
+@pytest.fixture(scope="session")
+def get_humanloop_client() -> GetHumanloopClientFn:
+ load_dotenv()
+ if not os.getenv("HUMANLOOP_API_KEY"):
+ pytest.fail("HUMANLOOP_API_KEY is not set for integration tests")
+
+ def _get_humanloop_client(use_local_files: bool = False) -> Humanloop:
+ return Humanloop(
+ api_key=os.getenv("HUMANLOOP_API_KEY"),
+ use_local_files=use_local_files,
+ )
+
+ return _get_humanloop_client
+
+
@pytest.fixture(scope="function")
def opentelemetry_hl_test_configuration(
opentelemetry_test_provider: TracerProvider,
- humanloop_client: BaseHumanloop,
) -> Generator[tuple[Tracer, InMemorySpanExporter], None, None]:
"""Configure OTel backend with HumanloopSpanProcessor.
@@ -162,117 +168,3 @@ def call_llm_messages() -> list[ChatCompletionMessageParam]:
"content": "Bonjour!",
},
]
-
-
-@dataclass
-class APIKeys:
- openai: str
- humanloop: str
-
-
-@pytest.fixture(scope="session")
-def api_keys() -> APIKeys:
- openai_key = os.getenv("OPENAI_API_KEY")
- humanloop_key = os.getenv("HUMANLOOP_API_KEY")
- for key_name, key_value in [
- ("OPENAI_API_KEY", openai_key),
- ("HUMANLOOP_API_KEY", humanloop_key),
- ]:
- if key_value is None:
- raise ValueError(f"{key_name} is not set in .env file")
- api_keys = APIKeys(
- openai=openai_key, # type: ignore [arg-type]
- humanloop=humanloop_key, # type: ignore [arg-type]
- )
- for key, value in asdict(api_keys).items():
- if value is None:
- raise ValueError(f"{key.upper()} key is not set in .env file")
- return api_keys
-
-
-@pytest.fixture(scope="session")
-def humanloop_client(api_keys: APIKeys) -> Humanloop:
- return Humanloop(api_key=api_keys.humanloop)
-
-
-@pytest.fixture(scope="session", autouse=True)
-def load_env():
- load_dotenv()
-
-
-def directory_cleanup(directory_id: str, humanloop_client: Humanloop):
- response = humanloop_client.directories.get(directory_id)
- for file in response.files:
- file_id = file.id
- if file.type == "prompt":
- client = humanloop_client.prompts # type: ignore [assignment]
- elif file.type == "tool":
- client = humanloop_client.tools # type: ignore [assignment]
- elif file.type == "dataset":
- client = humanloop_client.datasets # type: ignore [assignment]
- elif file.type == "evaluator":
- client = humanloop_client.evaluators # type: ignore [assignment]
- elif file.type == "flow":
- client = humanloop_client.flows # type: ignore [assignment]
- else:
- raise NotImplementedError(f"Unknown HL file type {file.type}")
- client.delete(file_id)
-
- for subdirectory in response.subdirectories:
- directory_cleanup(
- directory_id=subdirectory.id,
- humanloop_client=humanloop_client,
- )
-
- humanloop_client.directories.delete(id=response.id)
-
-
-@dataclass
-class DirectoryIdentifiers:
- path: str
- id: str
-
-
-@pytest.fixture()
-def test_directory(
- humanloop_client: Humanloop,
-) -> Generator[DirectoryIdentifiers, None, None]:
- # Generate a random alphanumeric directory name to avoid conflicts
- def get_random_string(length: int = 16) -> str:
- return "".join([random.choice(string.ascii_letters + "0123456789") for _ in range(length)])
-
- directory_path = "SDK_integ_test_" + get_random_string()
- response = humanloop_client.directories.create(path=directory_path)
- assert response.path == directory_path
- try:
- yield DirectoryIdentifiers(
- path=response.path,
- id=response.id,
- )
- finally:
- time.sleep(1)
- directory_cleanup(response.id, humanloop_client)
-
-
-@pytest.fixture()
-def get_test_path(test_directory: DirectoryIdentifiers) -> Callable[[str], str]:
- def generate_path(name: str) -> str:
- return f"{test_directory.path}/{name}"
-
- return generate_path
-
-
-# @pytest.fixture(scope="session", autouse=True)
-# def cleanup_test_dirs(humanloop_client: Humanloop):
-# def _cleanup_all_test_dirs():
-# dirs = humanloop_client.directories.list()
-# for dir in dirs:
-# if dir.path.startswith("SDK_integ_test_"):
-# directory_cleanup(
-# directory_id=dir.id,
-# humanloop_client=humanloop_client,
-# )
-
-# _cleanup_all_test_dirs()
-# yield
-# _cleanup_all_test_dirs()
diff --git a/tests/custom/integration/__init__.py b/tests/custom/integration/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/custom/integration/conftest.py b/tests/custom/integration/conftest.py
new file mode 100644
index 00000000..f918c48c
--- /dev/null
+++ b/tests/custom/integration/conftest.py
@@ -0,0 +1,259 @@
+from contextlib import contextmanager, redirect_stdout
+from dataclasses import dataclass
+import os
+import time
+from typing import Any, ContextManager, Generator, List, Union
+import io
+from typing import TextIO
+import uuid
+import pytest
+import dotenv
+from humanloop import AgentResponse, PromptResponse
+from tests.custom.types import GetHumanloopClientFn, SyncableFile
+from click.testing import CliRunner
+
+
+@dataclass
+class ResourceIdentifiers:
+ file_id: str
+ file_path: str
+
+
+@pytest.fixture()
+def capture_stdout() -> ContextManager[TextIO]:
+ @contextmanager
+ def _context_manager():
+ f = io.StringIO()
+ with redirect_stdout(f):
+ yield f
+
+ return _context_manager # type: ignore [return-value]
+
+
+@pytest.fixture(scope="session")
+def openai_key() -> str:
+ dotenv.load_dotenv()
+ if not os.getenv("OPENAI_API_KEY"):
+ pytest.fail("OPENAI_API_KEY is not set for integration tests")
+ return os.getenv("OPENAI_API_KEY") # type: ignore [return-value]
+
+
+@pytest.fixture(scope="function")
+def sdk_test_dir(get_humanloop_client: GetHumanloopClientFn) -> Generator[str, None, None]:
+ humanloop_client = get_humanloop_client()
+
+ def cleanup_directory(directory_id: str):
+ directory_response = humanloop_client.directories.get(id=directory_id)
+ for subdirectory in directory_response.subdirectories:
+ cleanup_directory(subdirectory.id)
+ for file in directory_response.files:
+ match file.type:
+ case "agent":
+ humanloop_client.agents.delete(id=file.id)
+ case "prompt":
+ humanloop_client.prompts.delete(id=file.id)
+ case "dataset":
+ humanloop_client.datasets.delete(id=file.id)
+ case "evaluator":
+ humanloop_client.evaluators.delete(id=file.id)
+ case "flow":
+ humanloop_client.flows.delete(id=file.id)
+ case "tool":
+ humanloop_client.tools.delete(id=file.id)
+ case _:
+ raise ValueError(f"Unknown file type: {file.type}")
+ humanloop_client.directories.delete(id=directory_response.id)
+
+ path = f"SDK_INTEGRATION_TEST_{uuid.uuid4()}"
+ response = None
+ try:
+ response = humanloop_client.directories.create(path=path)
+ yield response.path
+ except Exception as e:
+ pytest.fail(f"Failed to create directory {path}: {e}")
+ finally:
+ if response:
+ time.sleep(5)
+ cleanup_directory(response.id)
+
+
+@pytest.fixture(scope="function")
+def test_prompt_config() -> dict[str, Any]:
+ return {
+ "provider": "openai",
+ "model": "gpt-4o-mini",
+ "temperature": 0.5,
+ "template": [
+ {
+ "role": "system",
+ "content": "You are a helpful assistant. You must answer the user's question truthfully and at the level of a 5th grader.",
+ },
+ {
+ "role": "user",
+ "content": "{{question}}",
+ },
+ ],
+ }
+
+
+@pytest.fixture(scope="function")
+def eval_dataset(
+ get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str
+) -> Generator[ResourceIdentifiers, None, None]:
+ humanloop_client = get_humanloop_client()
+ dataset_path = f"{sdk_test_dir}/eval_dataset"
+ try:
+ response = humanloop_client.datasets.upsert(
+ path=dataset_path,
+ datapoints=[
+ {
+ "inputs": {
+ "question": "What is the capital of the France?",
+ },
+ },
+ {
+ "inputs": {
+ "question": "What is the capital of the Germany?",
+ },
+ },
+ {
+ "inputs": {
+ "question": "What is 2+2?",
+ },
+ },
+ ],
+ )
+ yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
+ humanloop_client.datasets.delete(id=response.id)
+ except Exception as e:
+ pytest.fail(f"Failed to create dataset {dataset_path}: {e}")
+
+
+@pytest.fixture(scope="function")
+def eval_prompt(
+ get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
+) -> Generator[ResourceIdentifiers, None, None]:
+ humanloop_client = get_humanloop_client()
+ prompt_path = f"{sdk_test_dir}/eval_prompt"
+ try:
+ response = humanloop_client.prompts.upsert(
+ path=prompt_path,
+ **test_prompt_config,
+ )
+ yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
+ humanloop_client.prompts.delete(id=response.id)
+ except Exception as e:
+ pytest.fail(f"Failed to create prompt {prompt_path}: {e}")
+
+
+@pytest.fixture(scope="function")
+def output_not_null_evaluator(
+ get_humanloop_client: GetHumanloopClientFn, sdk_test_dir: str
+) -> Generator[ResourceIdentifiers, None, None]:
+ humanloop_client = get_humanloop_client()
+ evaluator_path = f"{sdk_test_dir}/output_not_null_evaluator"
+ try:
+ response = humanloop_client.evaluators.upsert(
+ path=evaluator_path,
+ spec={
+ "arguments_type": "target_required",
+ "return_type": "boolean",
+ "code": """
+def output_not_null(log: dict) -> bool:
+ return log["output"] is not None
+ """,
+ "evaluator_type": "python",
+ },
+ )
+ yield ResourceIdentifiers(file_id=response.id, file_path=response.path)
+ humanloop_client.evaluators.delete(id=response.id)
+ except Exception as e:
+ pytest.fail(f"Failed to create evaluator {evaluator_path}: {e}")
+
+
+@pytest.fixture(scope="function")
+def id_for_staging_environment(get_humanloop_client: GetHumanloopClientFn, eval_prompt: ResourceIdentifiers) -> str:
+ humanloop_client = get_humanloop_client()
+ response = humanloop_client.prompts.list_environments(id=eval_prompt.file_id)
+ for environment in response:
+ if environment.name == "staging":
+ return environment.id
+ pytest.fail("Staging environment not found")
+
+
+@pytest.fixture
+def syncable_files_fixture(
+ get_humanloop_client: GetHumanloopClientFn,
+ sdk_test_dir: str,
+) -> Generator[list[SyncableFile], None, None]:
+ """Creates a predefined structure of files in Humanloop for testing sync."""
+ files: List[SyncableFile] = [
+ SyncableFile(
+ path="prompts/gpt-4",
+ type="prompt",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="prompts/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="prompts/nested/complex/gpt-4o",
+ type="prompt",
+ model="gpt-4o",
+ ),
+ SyncableFile(
+ path="agents/gpt-4",
+ type="agent",
+ model="gpt-4",
+ ),
+ SyncableFile(
+ path="agents/gpt-4o",
+ type="agent",
+ model="gpt-4o",
+ ),
+ ]
+
+ humanloop_client = get_humanloop_client()
+ created_files = []
+ for file in files:
+ full_path = f"{sdk_test_dir}/{file.path}"
+ response: Union[AgentResponse, PromptResponse]
+ if file.type == "prompt":
+ response = humanloop_client.prompts.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ elif file.type == "agent":
+ response = humanloop_client.agents.upsert(
+ path=full_path,
+ model=file.model,
+ )
+ created_files.append(
+ SyncableFile(
+ path=full_path, type=file.type, model=file.model, id=response.id, version_id=response.version_id
+ )
+ )
+
+ yield created_files
+
+
+@pytest.fixture
+def cli_runner() -> CliRunner:
+ """GIVEN a CLI runner
+ THEN it should be configured to catch exceptions
+ """
+ return CliRunner(mix_stderr=False)
+
+
+@pytest.fixture
+def no_humanloop_api_key_in_env(monkeypatch):
+ """Fixture that removes HUMANLOOP_API_KEY from environment variables.
+
+ Use this fixture in tests that verify behavior when no API key is available
+ in the environment (but could still be loaded from .env files).
+ """
+ # Remove API key from environment
+ monkeypatch.delenv("HUMANLOOP_API_KEY", raising=False)
+ yield
diff --git a/tests/integration/test_decorators.py b/tests/custom/integration/test_decorators.py
similarity index 56%
rename from tests/integration/test_decorators.py
rename to tests/custom/integration/test_decorators.py
index 218453a6..15057ba2 100644
--- a/tests/integration/test_decorators.py
+++ b/tests/custom/integration/test_decorators.py
@@ -2,27 +2,27 @@
from typing import Any
from openai import OpenAI
-from humanloop.client import Humanloop
-from humanloop.types.chat_message import ChatMessage
+from tests.custom.integration.conftest import GetHumanloopClientFn
def test_prompt_decorator(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
test_prompt_config: dict[str, Any],
openai_key: str,
):
try:
+ humanloop_client = get_humanloop_client()
prompt_path = f"{sdk_test_dir}/test_prompt"
- prompt_response = humanloop_test_client.prompts.upsert(
+ prompt_response = humanloop_client.prompts.upsert(
path=prompt_path,
**test_prompt_config,
)
- prompt_versions_response = humanloop_test_client.prompts.list_versions(id=prompt_response.id)
+ prompt_versions_response = humanloop_client.prompts.list_versions(id=prompt_response.id)
assert len(prompt_versions_response.records) == 1
- @humanloop_test_client.prompt(path=prompt_path)
+ @humanloop_client.prompt(path=prompt_path)
def my_prompt(question: str) -> str:
openai_client = OpenAI(api_key=openai_key)
@@ -37,26 +37,27 @@ def my_prompt(question: str) -> str:
assert "paris" in my_prompt("What is the capital of the France?").lower()
time.sleep(5)
- prompt_versions_response = humanloop_test_client.prompts.list_versions(id=prompt_response.id)
+ prompt_versions_response = humanloop_client.prompts.list_versions(id=prompt_response.id)
assert len(prompt_versions_response.records) == 2
- logs_response = humanloop_test_client.logs.list(file_id=prompt_response.id, page=1, size=50)
+ logs_response = humanloop_client.logs.list(file_id=prompt_response.id, page=1, size=50)
assert logs_response.items is not None and len(logs_response.items) == 1
finally:
- humanloop_test_client.prompts.delete(id=prompt_response.id)
+ humanloop_client.prompts.delete(id=prompt_response.id)
def test_call_prompt_in_flow_decorator(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
openai_key: str,
):
try:
+ humanloop_client = get_humanloop_client()
- @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow")
+ @humanloop_client.flow(path=f"{sdk_test_dir}/test_flow")
def my_flow(question: str) -> str:
- response = humanloop_test_client.prompts.call(
+ response = humanloop_client.prompts.call(
path=f"{sdk_test_dir}/test_prompt",
prompt={
"provider": "openai",
@@ -72,34 +73,35 @@ def my_flow(question: str) -> str:
assert "paris" in my_flow("What is the capital of the France?").lower()
time.sleep(5)
- prompt_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
+ prompt_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
assert prompt_response is not None
- prompt_logs_response = humanloop_test_client.logs.list(file_id=prompt_response.id, page=1, size=50)
+ prompt_logs_response = humanloop_client.logs.list(file_id=prompt_response.id, page=1, size=50)
assert prompt_logs_response.items is not None and len(prompt_logs_response.items) == 1
prompt_log = prompt_logs_response.items[0]
- flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
assert flow_response is not None
- flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ flow_logs_response = humanloop_client.logs.list(file_id=flow_response.id, page=1, size=50)
assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
flow_log = flow_logs_response.items[0]
assert prompt_log.trace_parent_id == flow_log.id
finally:
- flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow")
if flow_response is not None:
- humanloop_test_client.flows.delete(id=flow_response.id)
- prompt_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
+ humanloop_client.flows.delete(id=flow_response.id)
+ prompt_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_prompt")
if prompt_response is not None:
- humanloop_test_client.prompts.delete(id=prompt_response.id)
+ humanloop_client.prompts.delete(id=prompt_response.id)
def test_flow_decorator_logs_exceptions(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
):
try:
+ humanloop_client = get_humanloop_client()
- @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow_log_error")
+ @humanloop_client.flow(path=f"{sdk_test_dir}/test_flow_log_error")
def my_flow(question: str) -> str:
raise ValueError("This is a test exception")
@@ -107,27 +109,28 @@ def my_flow(question: str) -> str:
time.sleep(5)
- flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
assert flow_response is not None
- flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ flow_logs_response = humanloop_client.logs.list(file_id=flow_response.id, page=1, size=50)
assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
flow_log = flow_logs_response.items[0]
assert flow_log.error is not None
assert flow_log.output is None
finally:
- flow_response = humanloop_test_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_error")
if flow_response is not None:
- humanloop_test_client.flows.delete(id=flow_response.id)
+ humanloop_client.flows.delete(id=flow_response.id)
def test_flow_decorator_populates_output_message(
- humanloop_test_client: Humanloop,
+ get_humanloop_client: GetHumanloopClientFn,
sdk_test_dir: str,
):
try:
+ humanloop_client = get_humanloop_client()
- @humanloop_test_client.flow(path=f"{sdk_test_dir}/test_flow_log_output_message")
+ @humanloop_client.flow(path=f"{sdk_test_dir}/test_flow_log_output_message")
def my_flow(question: str) -> dict[str, Any]:
return {"role": "user", "content": question}
@@ -135,11 +138,9 @@ def my_flow(question: str) -> dict[str, Any]:
time.sleep(5)
- flow_response = humanloop_test_client.files.retrieve_by_path(
- path=f"{sdk_test_dir}/test_flow_log_output_message"
- )
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_output_message")
assert flow_response is not None
- flow_logs_response = humanloop_test_client.logs.list(file_id=flow_response.id, page=1, size=50)
+ flow_logs_response = humanloop_client.logs.list(file_id=flow_response.id, page=1, size=50)
assert flow_logs_response.items is not None and len(flow_logs_response.items) == 1
flow_log = flow_logs_response.items[0]
assert flow_log.output_message is not None
@@ -147,8 +148,6 @@ def my_flow(question: str) -> dict[str, Any]:
assert flow_log.error is None
finally:
- flow_response = humanloop_test_client.files.retrieve_by_path(
- path=f"{sdk_test_dir}/test_flow_log_output_message"
- )
+ flow_response = humanloop_client.files.retrieve_by_path(path=f"{sdk_test_dir}/test_flow_log_output_message")
if flow_response is not None:
- humanloop_test_client.flows.delete(id=flow_response.id)
+ humanloop_client.flows.delete(id=flow_response.id)
diff --git a/tests/integration/test_evals.py b/tests/custom/integration/test_evals.py
similarity index 66%
rename from tests/integration/test_evals.py
rename to tests/custom/integration/test_evals.py
index 49bbb6dc..2ec74d93 100644
--- a/tests/integration/test_evals.py
+++ b/tests/custom/integration/test_evals.py
@@ -2,18 +2,19 @@
from typing import Any
import pytest
-from humanloop.client import Humanloop
from humanloop.error import HumanloopRuntimeError
-from tests.integration.conftest import TestIdentifiers
+from tests.custom.integration.conftest import ResourceIdentifiers
+from tests.custom.types import GetHumanloopClientFn
def test_eval_run_works_on_online_files(
- humanloop_test_client: Humanloop,
- output_not_null_evaluator: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- eval_prompt: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ output_not_null_evaluator: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ eval_prompt: ResourceIdentifiers,
) -> None:
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": eval_prompt.file_path,
@@ -29,29 +30,30 @@ def test_eval_run_works_on_online_files(
],
)
time.sleep(5)
- response = humanloop_test_client.evaluations.list(file_id=eval_prompt.file_id)
+ response = humanloop_client.evaluations.list(file_id=eval_prompt.file_id)
assert response.items and len(response.items) == 1
evaluation_id = response.items[0].id
- run_evaluation_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined]
+ run_evaluation_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined]
assert run_evaluation_response.runs[0].status == "completed"
def test_eval_run_version_id(
- humanloop_test_client: Humanloop,
- output_not_null_evaluator: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- eval_prompt: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ output_not_null_evaluator: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ eval_prompt: ResourceIdentifiers,
test_prompt_config: dict[str, Any],
) -> None:
+ humanloop_client = get_humanloop_client()
# GIVEN a prompt where a non-default version is created
new_test_prompt_config = test_prompt_config.copy()
new_test_prompt_config["temperature"] = 1
- new_prompt_version_response = humanloop_test_client.prompts.upsert(
+ new_prompt_version_response = humanloop_client.prompts.upsert(
path=eval_prompt.file_path,
**new_test_prompt_config,
)
# WHEN creating an evaluation using version_id
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"id": new_prompt_version_response.id,
@@ -68,44 +70,45 @@ def test_eval_run_version_id(
],
)
# THEN we evaluate the version created in the test
- evaluations_response = humanloop_test_client.evaluations.list(file_id=new_prompt_version_response.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=new_prompt_version_response.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
assert runs_response.runs[0].status == "completed"
assert (
runs_response.runs[0].version
and runs_response.runs[0].version.version_id == new_prompt_version_response.version_id
)
- list_versions_response = humanloop_test_client.prompts.list_versions(id=new_prompt_version_response.id)
+ list_versions_response = humanloop_client.prompts.list_versions(id=new_prompt_version_response.id)
assert list_versions_response.records and len(list_versions_response.records) == 2
# THEN the version used in evaluation is not the default version
- response = humanloop_test_client.prompts.get(id=new_prompt_version_response.id)
+ response = humanloop_client.prompts.get(id=new_prompt_version_response.id)
assert response.version_id != new_prompt_version_response.version_id
def test_eval_run_environment(
- humanloop_test_client: Humanloop,
- output_not_null_evaluator: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- eval_prompt: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ output_not_null_evaluator: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ eval_prompt: ResourceIdentifiers,
test_prompt_config: dict[str, Any],
id_for_staging_environment: str,
) -> None:
+ humanloop_client = get_humanloop_client()
# GIVEN a prompt deployed to staging environment
new_test_prompt_config = test_prompt_config.copy()
new_test_prompt_config["temperature"] = 1
- new_prompt_version_response = humanloop_test_client.prompts.upsert(
+ new_prompt_version_response = humanloop_client.prompts.upsert(
path=eval_prompt.file_path,
**new_test_prompt_config,
)
- humanloop_test_client.prompts.set_deployment(
+ humanloop_client.prompts.set_deployment(
id=new_prompt_version_response.id,
environment_id=id_for_staging_environment,
version_id=new_prompt_version_response.version_id,
)
# WHEN creating an evaluation using environment
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"id": new_prompt_version_response.id,
@@ -122,30 +125,31 @@ def test_eval_run_environment(
],
)
# THEN evaluation is done with the version deployed to staging environment
- evaluations_response = humanloop_test_client.evaluations.list(file_id=new_prompt_version_response.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=new_prompt_version_response.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
assert runs_response.runs[0].status == "completed"
assert (
runs_response.runs[0].version
and runs_response.runs[0].version.version_id == new_prompt_version_response.version_id
)
- default_prompt_version_response = humanloop_test_client.prompts.get(id=new_prompt_version_response.id)
+ default_prompt_version_response = humanloop_client.prompts.get(id=new_prompt_version_response.id)
assert default_prompt_version_response.version_id != new_prompt_version_response.version_id
@pytest.mark.parametrize("version_lookup", ["version_id", "environment"])
def test_eval_run_version_lookup_fails_with_path(
- humanloop_test_client: Humanloop,
- eval_prompt: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ eval_prompt: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
version_lookup: str,
):
# GIVEN an eval run where we try to evaluate a non-default version
with pytest.raises(HumanloopRuntimeError) as e:
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": eval_prompt.file_path,
@@ -167,13 +171,14 @@ def test_eval_run_version_lookup_fails_with_path(
def test_eval_run_with_version_upsert(
- humanloop_test_client: Humanloop,
- eval_prompt: TestIdentifiers,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ eval_prompt: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
test_prompt_config: dict[str, Any],
):
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": eval_prompt.file_path,
@@ -193,23 +198,24 @@ def test_eval_run_with_version_upsert(
],
)
# THEN the version is upserted and evaluation finishes successfully
- evaluations_response = humanloop_test_client.evaluations.list(file_id=eval_prompt.file_id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=eval_prompt.file_id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
assert runs_response.runs[0].status == "completed"
# THEN a version was upserted based on file.version
- list_prompt_versions_response = humanloop_test_client.prompts.list_versions(id=eval_prompt.file_id)
+ list_prompt_versions_response = humanloop_client.prompts.list_versions(id=eval_prompt.file_id)
assert list_prompt_versions_response.records and len(list_prompt_versions_response.records) == 2
def test_flow_eval_does_not_work_without_callable(
- humanloop_test_client: Humanloop,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
):
with pytest.raises(HumanloopRuntimeError) as e:
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": "Test Flow",
@@ -234,28 +240,29 @@ def test_flow_eval_does_not_work_without_callable(
def test_flow_eval_works_with_callable(
- humanloop_test_client: Humanloop,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
sdk_test_dir: str,
):
+ humanloop_client = get_humanloop_client()
flow_path = f"{sdk_test_dir}/Test Flow"
# GIVEN a flow with a callable
- flow_response = humanloop_test_client.flows.upsert(
+ flow_response = humanloop_client.flows.upsert(
path=flow_path,
attributes={
"foo": "bar",
},
)
try:
- flow = humanloop_test_client.flows.upsert(
+ flow = humanloop_client.flows.upsert(
path=flow_path,
attributes={
"foo": "bar",
},
)
# WHEN we run an evaluation with the flow
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"id": flow.id,
@@ -272,22 +279,23 @@ def test_flow_eval_works_with_callable(
],
)
# THEN the evaluation finishes successfully
- evaluations_response = humanloop_test_client.evaluations.list(file_id=flow.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=flow.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id)
assert runs_response.runs[0].status == "completed"
finally:
- humanloop_test_client.flows.delete(id=flow_response.id)
+ humanloop_client.flows.delete(id=flow_response.id)
def test_cannot_evaluate_agent_with_callable(
- humanloop_test_client: Humanloop,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
):
with pytest.raises(ValueError) as e:
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": "Test Agent",
@@ -307,14 +315,15 @@ def test_cannot_evaluate_agent_with_callable(
def test_flow_eval_resolves_to_default_with_callable(
- humanloop_test_client: Humanloop,
- output_not_null_evaluator: TestIdentifiers,
- eval_dataset: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ output_not_null_evaluator: ResourceIdentifiers,
+ eval_dataset: ResourceIdentifiers,
sdk_test_dir: str,
) -> None:
+ humanloop_client = get_humanloop_client()
# GIVEN a flow with some attributes
flow_path = f"{sdk_test_dir}/Test Flow"
- flow_response = humanloop_test_client.flows.upsert(
+ flow_response = humanloop_client.flows.upsert(
path=flow_path,
attributes={
"foo": "bar",
@@ -322,7 +331,7 @@ def test_flow_eval_resolves_to_default_with_callable(
)
try:
# WHEN running an evaluation with the flow's callable but no version
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"id": flow_response.id,
@@ -339,24 +348,24 @@ def test_flow_eval_resolves_to_default_with_callable(
],
)
# THEN the evaluation finishes successfully
- evaluations_response = humanloop_test_client.evaluations.list(file_id=flow_response.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=flow_response.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items and evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
assert runs_response.runs[0].status == "completed"
finally:
# Clean up test resources
- humanloop_test_client.flows.delete(id=flow_response.id)
+ humanloop_client.flows.delete(id=flow_response.id)
-@pytest.mark.skip(reason="Skip until agents are in prod")
def test_agent_eval_works_upserting(
- humanloop_test_client: Humanloop,
- eval_dataset: TestIdentifiers,
- output_not_null_evaluator: TestIdentifiers,
+ get_humanloop_client: GetHumanloopClientFn,
+ eval_dataset: ResourceIdentifiers,
+ output_not_null_evaluator: ResourceIdentifiers,
sdk_test_dir: str,
):
- humanloop_test_client.evaluations.run( # type: ignore [attr-defined]
+ humanloop_client = get_humanloop_client()
+ humanloop_client.evaluations.run( # type: ignore [attr-defined]
name="test_eval_run",
file={
"path": f"{sdk_test_dir}/Test Agent",
@@ -387,7 +396,7 @@ def test_agent_eval_works_upserting(
}
],
)
- files_response = humanloop_test_client.files.list_files(page=1, size=100)
+ files_response = humanloop_client.files.list_files(page=1, size=100)
eval_agent = None
for file in files_response.records:
if file.path == f"{sdk_test_dir}/Test Agent":
@@ -395,8 +404,8 @@ def test_agent_eval_works_upserting(
break
assert eval_agent and eval_agent.type == "agent"
# THEN the evaluation finishes successfully
- evaluations_response = humanloop_test_client.evaluations.list(file_id=eval_agent.id)
+ evaluations_response = humanloop_client.evaluations.list(file_id=eval_agent.id)
assert evaluations_response.items and len(evaluations_response.items) == 1
evaluation_id = evaluations_response.items[0].id
- runs_response = humanloop_test_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
+ runs_response = humanloop_client.evaluations.list_runs_for_evaluation(id=evaluation_id) # type: ignore [attr-defined, arg-type]
assert runs_response.runs[0].status == "completed"
diff --git a/tests/custom/integration/test_sync.py b/tests/custom/integration/test_sync.py
new file mode 100644
index 00000000..6e7b002b
--- /dev/null
+++ b/tests/custom/integration/test_sync.py
@@ -0,0 +1,206 @@
+from typing import List, Union
+from pathlib import Path
+import pytest
+from humanloop import AgentResponse, PromptResponse
+from humanloop.prompts.client import PromptsClient
+from humanloop.agents.client import AgentsClient
+from humanloop.error import HumanloopRuntimeError
+from tests.custom.types import GetHumanloopClientFn, SyncableFile
+
+
+@pytest.fixture
+def cleanup_local_files():
+ """Cleanup any locally synced files after tests"""
+ yield
+ local_dir = Path("humanloop")
+ if local_dir.exists():
+ import shutil
+
+ shutil.rmtree(local_dir)
+
+
+def test_pull_basic(
+ syncable_files_fixture: List[SyncableFile],
+ get_humanloop_client: GetHumanloopClientFn,
+):
+ """Test that humanloop.sync() correctly syncs remote files to local filesystem"""
+ # GIVEN a set of files in the remote system (from syncable_files_fixture)
+ humanloop_client = get_humanloop_client()
+
+ # WHEN running the sync
+ humanloop_client.pull()
+
+ # THEN our local filesystem should mirror the remote filesystem in the HL Workspace
+ for file in syncable_files_fixture:
+ extension = f".{file.type}"
+ local_path = Path("humanloop") / f"{file.path}{extension}"
+
+ # THEN the file and its directory should exist
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # THEN the file should not be empty
+ content = local_path.read_text()
+ assert content, f"File at {local_path} should not be empty"
+
+
+def test_overload_with_local_files(
+ get_humanloop_client: GetHumanloopClientFn,
+ syncable_files_fixture: List[SyncableFile],
+):
+ """Test that overload_with_local_files correctly handles local files."""
+ # GIVEN a client with use_local_files=True and pulled files
+ humanloop_client = get_humanloop_client(use_local_files=True)
+ humanloop_client.pull()
+
+ # GIVEN a test file from the structure
+ test_file = syncable_files_fixture[0]
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # THEN the file should exist locally
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # WHEN calling the file
+ response: Union[AgentResponse, PromptResponse]
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.call( # type: ignore [assignment]
+ path=test_file.path, messages=[{"role": "user", "content": "Testing"}]
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.call( # type: ignore [assignment]
+ path=test_file.path, messages=[{"role": "user", "content": "Testing"}]
+ )
+ # THEN the response should not be None
+ assert response is not None
+
+ # WHEN calling with an invalid path
+ # THEN it should raise HumanloopRuntimeError
+ with pytest.raises(HumanloopRuntimeError):
+ sub_client: Union[PromptsClient, AgentsClient]
+ match test_file.type:
+ case "prompt":
+ sub_client = humanloop_client.prompts
+ case "agent":
+ sub_client = humanloop_client.agents
+ case _:
+ raise ValueError(f"Invalid file type: {test_file.type}")
+ sub_client.call(path="invalid/path")
+
+
+def test_overload_log_with_local_files(
+ get_humanloop_client: GetHumanloopClientFn,
+ syncable_files_fixture: List[SyncableFile],
+ sdk_test_dir: str,
+):
+ """Test that overload_with_local_files correctly handles local files for log operations."""
+ # GIVEN a client with use_local_files=True and pulled files
+ humanloop_client = get_humanloop_client(use_local_files=True)
+ humanloop_client.pull()
+
+ # GIVEN a test file from the structure
+ test_file = syncable_files_fixture[0]
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # THEN the file should exist locally
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # WHEN logging with the pulled file
+ if test_file.type == "prompt":
+ response = humanloop_client.prompts.log( # type: ignore [assignment]
+ path=test_file.path, messages=[{"role": "user", "content": "Testing"}], output="Test response"
+ )
+ elif test_file.type == "agent":
+ response = humanloop_client.agents.log( # type: ignore [assignment]
+ path=test_file.path, messages=[{"role": "user", "content": "Testing"}], output="Test response"
+ )
+ # THEN the response should not be None
+ assert response is not None
+
+ # WHEN logging with an invalid path
+ # THEN it should raise HumanloopRuntimeError
+ with pytest.raises(HumanloopRuntimeError):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.log(
+ path=f"{sdk_test_dir}/invalid/path",
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response",
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.log(
+ path=f"{sdk_test_dir}/invalid/path",
+ messages=[{"role": "user", "content": "Testing"}],
+ output="Test response",
+ )
+
+
+def test_overload_version_environment_handling(
+ get_humanloop_client: GetHumanloopClientFn,
+ syncable_files_fixture: List[SyncableFile],
+):
+ """Test that overload_with_local_files correctly handles version_id and environment parameters."""
+ # GIVEN a client with use_local_files=True and pulled files
+ humanloop_client = get_humanloop_client(use_local_files=True)
+ humanloop_client.pull()
+
+ # GIVEN a test file from the structure
+ test_file = syncable_files_fixture[0]
+ extension = f".{test_file.type}"
+ local_path = Path("humanloop") / f"{test_file.path}{extension}"
+
+ # THEN the file should exist locally
+ assert local_path.exists(), f"Expected pulled file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+
+ # WHEN calling with version_id
+ # THEN it should raise HumanloopRuntimeError
+ with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+
+ # WHEN calling with environment
+ # THEN it should raise HumanloopRuntimeError
+ with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.call(
+ path=test_file.path,
+ environment="production",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+
+ # WHEN calling with both version_id and environment
+ # THEN it should raise HumanloopRuntimeError
+ with pytest.raises(HumanloopRuntimeError, match="Cannot use local file.*version_id or environment was specified"):
+ if test_file.type == "prompt":
+ humanloop_client.prompts.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
+ elif test_file.type == "agent":
+ humanloop_client.agents.call(
+ path=test_file.path,
+ version_id=test_file.version_id,
+ environment="staging",
+ messages=[{"role": "user", "content": "Testing"}],
+ )
diff --git a/tests/custom/integration/test_sync_cli.py b/tests/custom/integration/test_sync_cli.py
new file mode 100644
index 00000000..3957aed2
--- /dev/null
+++ b/tests/custom/integration/test_sync_cli.py
@@ -0,0 +1,179 @@
+from pathlib import Path
+from unittest import mock
+import pytest
+from click.testing import CliRunner
+from humanloop.cli.__main__ import cli
+from tests.custom.types import SyncableFile
+
+
+@pytest.fixture
+def no_env_file_loading():
+ """Fixture that prevents loading API keys from any .env files.
+
+ Use this fixture in tests that verify behavior when no .env files should
+ be processed, regardless of whether they exist or not.
+ """
+ # Prevent any .env file from being loaded
+ with mock.patch("humanloop.cli.__main__.load_dotenv", lambda *args, **kwargs: None):
+ yield
+
+
+def test_pull_without_api_key(cli_runner: CliRunner, no_humanloop_api_key_in_env, no_env_file_loading):
+ """GIVEN no API key in environment
+ WHEN running pull command
+ THEN it should fail with appropriate error message
+ """
+ # WHEN running pull command
+ result = cli_runner.invoke(cli, ["pull", "--local-files-directory", "humanloop"])
+
+ # THEN it should fail with appropriate error message
+ assert result.exit_code == 1 # Our custom error code for API key issues
+ assert "No API key found" in result.output
+ assert "Set HUMANLOOP_API_KEY in .env file or environment" in result.output
+
+
+def test_pull_basic(
+ cli_runner: CliRunner,
+ syncable_files_fixture: list[SyncableFile],
+ tmp_path: Path, # this path is used as a temporary store for files locally
+):
+ # GIVEN a base directory for pulled files
+ base_dir = str(tmp_path / "humanloop")
+
+ # WHEN running pull command
+ result = cli_runner.invoke(cli, ["pull", "--local-files-directory", base_dir, "--verbose"])
+
+ # THEN it should succeed
+ assert result.exit_code == 0
+ assert "Pulling files from Humanloop..." in result.output
+ assert "Pull completed" in result.output
+
+ # THEN the files should exist locally
+ for file in syncable_files_fixture:
+ extension = f".{file.type}"
+ local_path = Path(base_dir) / f"{file.path}{extension}"
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+ assert local_path.parent.exists(), f"Expected directory at {local_path.parent}"
+ assert local_path.read_text(), f"File at {local_path} should not be empty"
+
+
+def test_pull_with_specific_path(
+ cli_runner: CliRunner,
+ syncable_files_fixture: list[SyncableFile],
+ tmp_path: Path,
+):
+ """GIVEN a specific path to pull
+ WHEN running pull command with path
+ THEN it should pull only files from that path
+ """
+ # GIVEN a base directory and specific path
+ base_dir = str(tmp_path / "humanloop")
+ test_path = syncable_files_fixture[
+ 0
+ ].path.split(
+ "/"
+ )[
+ 0
+ ] # Retrieve the prefix of the first file's path which corresponds to the sdk_test_dir used within syncable_files_fixture
+
+ # WHEN running pull command with path
+ result = cli_runner.invoke(cli, ["pull", "--local-files-directory", base_dir, "--path", test_path, "--verbose"])
+
+ # THEN it should succeed and show the path
+ assert result.exit_code == 0
+ assert f"Path: {test_path}" in result.output
+
+ # THEN only files from that path should exist locally
+ for file in syncable_files_fixture:
+ extension = f".{file.type}"
+ local_path = Path(base_dir) / f"{file.path}{extension}"
+ if file.path.startswith(test_path):
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+ else:
+ assert not local_path.exists(), f"Unexpected file at {local_path}"
+
+
+def test_pull_with_environment(
+ cli_runner: CliRunner,
+ syncable_files_fixture: list[SyncableFile],
+ tmp_path: Path,
+):
+ # GIVEN a base directory and environment
+ base_dir = str(tmp_path / "humanloop")
+ environment = "staging"
+
+ # WHEN running pull command with environment
+ result = cli_runner.invoke(
+ cli,
+ [
+ "pull",
+ "--local-files-directory",
+ base_dir,
+ "--environment",
+ environment,
+ "--verbose",
+ ],
+ )
+
+ # THEN it should succeed and show the environment
+ assert result.exit_code == 0
+ assert f"Environment: {environment}" in result.output
+
+
+def test_pull_with_quiet_mode(
+ cli_runner: CliRunner,
+ syncable_files_fixture: list[SyncableFile],
+ tmp_path: Path,
+):
+ # GIVEN a base directory and quiet mode
+ base_dir = str(tmp_path / "humanloop")
+
+ # WHEN running pull command with quiet mode
+ result = cli_runner.invoke(cli, ["pull", "--local-files-directory", base_dir, "--quiet"])
+
+ # THEN it should succeed but not show file list
+ assert result.exit_code == 0
+ assert "Successfully pulled" not in result.output
+
+ # THEN files should still be pulled
+ for file in syncable_files_fixture:
+ extension = f".{file.type}"
+ local_path = Path(base_dir) / f"{file.path}{extension}"
+ assert local_path.exists(), f"Expected synced file at {local_path}"
+
+
+def test_pull_with_invalid_path(
+ cli_runner: CliRunner,
+):
+ # GIVEN an invalid base directory
+ path = "nonexistent/path"
+
+ # WHEN running pull command
+ result = cli_runner.invoke(cli, ["pull", "--path", path])
+
+ # THEN it should fail
+ assert result.exit_code == 1
+ assert "Error" in result.output
+
+
+def test_pull_with_invalid_environment(cli_runner: CliRunner, tmp_path: Path):
+ # GIVEN an invalid environment
+ environment = "nonexistent"
+ base_dir = str(tmp_path / "humanloop")
+
+ # WHEN running pull command
+ result = cli_runner.invoke(
+ cli,
+ [
+ "pull",
+ "--local-files-directory",
+ base_dir,
+ "--environment",
+ environment,
+ "--verbose",
+ ],
+ )
+
+ # THEN it should fail
+ assert result.exit_code == 1
+ assert "Error" in result.output
diff --git a/tests/custom/otel/__init__.py b/tests/custom/otel/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/otel/test_helpers.py b/tests/custom/otel/test_helpers.py
similarity index 100%
rename from tests/otel/test_helpers.py
rename to tests/custom/otel/test_helpers.py
diff --git a/tests/custom/sync/__init__.py b/tests/custom/sync/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/custom/sync/test_client.py b/tests/custom/sync/test_client.py
new file mode 100644
index 00000000..a349fd0c
--- /dev/null
+++ b/tests/custom/sync/test_client.py
@@ -0,0 +1,126 @@
+import logging
+import pytest
+from pathlib import Path
+from unittest.mock import Mock, patch
+from humanloop.sync.sync_client import SyncClient, SerializableFileType
+from humanloop.error import HumanloopRuntimeError
+from typing import Literal
+
+
+@pytest.fixture
+def mock_client() -> Mock:
+ return Mock()
+
+
+@pytest.fixture
+def sync_client(mock_client: Mock, tmp_path: Path) -> SyncClient:
+ return SyncClient(
+ client=mock_client,
+ base_dir=str(tmp_path),
+ cache_size=10,
+ log_level=logging.DEBUG, # DEBUG level for testing # noqa: F821
+ )
+
+
+def test_init(sync_client: SyncClient, tmp_path: Path):
+ """Test basic initialization of SyncClient."""
+ # GIVEN a SyncClient instance
+ # THEN it should be initialized with correct base directory, cache size and file types
+ assert sync_client.base_dir == tmp_path
+ assert sync_client._cache_size == 10
+ assert sync_client.SERIALIZABLE_FILE_TYPES == frozenset(["prompt", "agent"])
+
+
+def test_normalize_path(sync_client: SyncClient):
+ """Test path normalization functionality."""
+ # GIVEN various file paths with different formats
+ test_cases = [
+ ("path/to/file.prompt", "path/to/file"),
+ ("path\\to\\file.agent", "path/to/file"),
+ ("trailing/slashes/file.agent/", "trailing/slashes/file"),
+ ("multiple//slashes//file.prompt", "multiple/slashes/file"),
+ ]
+
+ for input_path, expected in test_cases:
+ # WHEN they are normalized
+ normalized = sync_client._normalize_path(input_path)
+ # THEN they should be converted to the expected format
+ assert normalized == expected
+
+ # Test absolute path raises error
+ with pytest.raises(HumanloopRuntimeError, match="Absolute paths are not supported"):
+ sync_client._normalize_path("/leading/slashes/file.prompt")
+
+
+def test_is_file(sync_client: SyncClient):
+ """Test file type detection."""
+ # GIVEN various file paths
+ # WHEN checking if they are valid file types
+ # THEN only .prompt and .agent files should return True
+ assert sync_client.is_file("test.prompt")
+ assert sync_client.is_file("test.agent")
+ assert not sync_client.is_file("test.txt")
+ assert not sync_client.is_file("test")
+
+
+def test_save_and_read_file(sync_client: SyncClient):
+ """Test saving and reading files."""
+ # GIVEN a file content and path
+ content = "test content"
+ path = "test/path"
+ file_type: SerializableFileType = "prompt"
+
+ # WHEN saving the file
+ sync_client._save_serialized_file(content, path, "prompt")
+ saved_path = sync_client.base_dir / path
+ saved_path = saved_path.parent / f"{saved_path.stem}.{file_type}"
+
+ # THEN the file should exist on disk
+ assert saved_path.exists()
+
+ # WHEN reading the file
+ read_content = sync_client.get_file_content(path, file_type)
+
+ # THEN the content should match
+ assert read_content == content
+
+
+def test_error_handling(sync_client: SyncClient):
+ """Test error handling in various scenarios."""
+ # GIVEN a nonexistent file
+ # WHEN trying to read it
+ # THEN a HumanloopRuntimeError should be raised
+ with pytest.raises(HumanloopRuntimeError, match="Local file not found"):
+ sync_client.get_file_content("nonexistent", "prompt")
+
+ # GIVEN an API error
+ # WHEN trying to pull a file
+ # THEN it should return False
+ with patch.object(sync_client.client.files, "retrieve_by_path", side_effect=Exception("API Error")):
+ assert not sync_client._pull_file("test.prompt")
+
+
+def test_cache_functionality(sync_client: SyncClient):
+ """Test LRU cache functionality."""
+ # GIVEN a test file
+ content = "test content"
+ path = "test/path"
+ file_type: Literal["prompt", "agent"] = "prompt"
+ sync_client._save_serialized_file(content, path, file_type)
+
+ # WHEN reading the file for the first time
+ sync_client.get_file_content(path, file_type)
+ # THEN it should hit disk (implicitly verified by no cache hit)
+
+ # WHEN modifying the file on disk
+ saved_path = sync_client.base_dir / f"{path}.{file_type}"
+ saved_path.write_text("modified content")
+
+ # THEN subsequent reads should use cache
+ assert sync_client.get_file_content(path, file_type) == content
+
+ # WHEN clearing the cache
+ sync_client.clear_cache()
+
+ # THEN new content should be read from disk
+ assert sync_client.get_file_content(path, file_type) == "modified content"
diff --git a/tests/custom/test_client.py b/tests/custom/test_client.py
deleted file mode 100644
index 3e7c8334..00000000
--- a/tests/custom/test_client.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import pytest
-
-
-# Get started with writing tests with pytest at https://docs.pytest.org
-@pytest.mark.skip(reason="Unimplemented")
-def test_client() -> None:
- assert True is True
diff --git a/tests/custom/types.py b/tests/custom/types.py
new file mode 100644
index 00000000..7a198456
--- /dev/null
+++ b/tests/custom/types.py
@@ -0,0 +1,15 @@
+from typing import Protocol, NamedTuple
+from humanloop.client import Humanloop
+from humanloop import FileType
+
+
+class GetHumanloopClientFn(Protocol):
+ def __call__(self, use_local_files: bool = False) -> Humanloop: ...
+
+
+class SyncableFile(NamedTuple):
+ path: str
+ type: FileType
+ model: str
+ id: str = ""
+ version_id: str = ""
diff --git a/tests/integration/conftest.py b/tests/integration/conftest.py
deleted file mode 100644
index 72611b5d..00000000
--- a/tests/integration/conftest.py
+++ /dev/null
@@ -1,152 +0,0 @@
-from contextlib import contextmanager, redirect_stdout
-from dataclasses import dataclass
-import os
-from typing import Any, ContextManager, Generator
-import io
-from typing import TextIO
-import uuid
-import pytest
-import dotenv
-from humanloop.client import Humanloop
-
-
-@dataclass
-class TestIdentifiers:
- file_id: str
- file_path: str
-
-
-@pytest.fixture()
-def capture_stdout() -> ContextManager[TextIO]:
- @contextmanager
- def _context_manager():
- f = io.StringIO()
- with redirect_stdout(f):
- yield f
-
- return _context_manager # type: ignore [return-value]
-
-
-@pytest.fixture(scope="session")
-def openai_key() -> str:
- dotenv.load_dotenv()
- if not os.getenv("OPENAI_API_KEY"):
- pytest.fail("OPENAI_API_KEY is not set for integration tests")
- return os.getenv("OPENAI_API_KEY") # type: ignore [return-value]
-
-
-@pytest.fixture(scope="session")
-def humanloop_test_client() -> Humanloop:
- dotenv.load_dotenv()
- if not os.getenv("HUMANLOOP_API_KEY"):
- pytest.fail("HUMANLOOP_API_KEY is not set for integration tests")
- return Humanloop(api_key=os.getenv("HUMANLOOP_API_KEY")) # type: ignore [return-value]
-
-
-@pytest.fixture(scope="function")
-def sdk_test_dir(humanloop_test_client: Humanloop) -> Generator[str, None, None]:
- path = f"SDK_INTEGRATION_TEST_{uuid.uuid4()}"
- try:
- response = humanloop_test_client.directories.create(path=path)
- yield response.path
- humanloop_test_client.directories.delete(id=response.id)
- except Exception as e:
- pytest.fail(f"Failed to create directory {path}: {e}")
-
-
-@pytest.fixture(scope="function")
-def test_prompt_config() -> dict[str, Any]:
- return {
- "provider": "openai",
- "model": "gpt-4o-mini",
- "temperature": 0.5,
- "template": [
- {
- "role": "system",
- "content": "You are a helpful assistant. You must answer the user's question truthfully and at the level of a 5th grader.",
- },
- {
- "role": "user",
- "content": "{{question}}",
- },
- ],
- }
-
-
-@pytest.fixture(scope="function")
-def eval_dataset(humanloop_test_client: Humanloop, sdk_test_dir: str) -> Generator[TestIdentifiers, None, None]:
- dataset_path = f"{sdk_test_dir}/eval_dataset"
- try:
- response = humanloop_test_client.datasets.upsert(
- path=dataset_path,
- datapoints=[
- {
- "inputs": {
- "question": "What is the capital of the France?",
- },
- },
- {
- "inputs": {
- "question": "What is the capital of the Germany?",
- },
- },
- {
- "inputs": {
- "question": "What is 2+2?",
- },
- },
- ],
- )
- yield TestIdentifiers(file_id=response.id, file_path=response.path)
- humanloop_test_client.datasets.delete(id=response.id)
- except Exception as e:
- pytest.fail(f"Failed to create dataset {dataset_path}: {e}")
-
-
-@pytest.fixture(scope="function")
-def eval_prompt(
- humanloop_test_client: Humanloop, sdk_test_dir: str, openai_key: str, test_prompt_config: dict[str, Any]
-) -> Generator[TestIdentifiers, None, None]:
- prompt_path = f"{sdk_test_dir}/eval_prompt"
- try:
- response = humanloop_test_client.prompts.upsert(
- path=prompt_path,
- **test_prompt_config,
- )
- yield TestIdentifiers(file_id=response.id, file_path=response.path)
- humanloop_test_client.prompts.delete(id=response.id)
- except Exception as e:
- pytest.fail(f"Failed to create prompt {prompt_path}: {e}")
-
-
-@pytest.fixture(scope="function")
-def output_not_null_evaluator(
- humanloop_test_client: Humanloop, sdk_test_dir: str
-) -> Generator[TestIdentifiers, None, None]:
- evaluator_path = f"{sdk_test_dir}/output_not_null_evaluator"
- try:
- response = humanloop_test_client.evaluators.upsert(
- path=evaluator_path,
- spec={
- "arguments_type": "target_required",
- "return_type": "boolean",
- "code": """
-def output_not_null(log: dict) -> bool:
- return log["output"] is not None
- """,
- "evaluator_type": "python",
- },
- )
- yield TestIdentifiers(file_id=response.id, file_path=response.path)
- humanloop_test_client.evaluators.delete(id=response.id)
- except Exception as e:
- pytest.fail(f"Failed to create evaluator {evaluator_path}: {e}")
-
-
-@pytest.fixture(scope="function")
-def id_for_staging_environment(humanloop_test_client: Humanloop, eval_prompt: TestIdentifiers) -> str:
- response = humanloop_test_client.prompts.list_environments(id=eval_prompt.file_id)
- for environment in response:
- if environment.name == "staging":
- return environment.id
- pytest.fail("Staging environment not found")