From f0438cebcfc587af81d967e610dc33ea5a53bb32 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 03:03:00 +0000
Subject: [PATCH 01/10] chore(ci): add timeout thresholds for CI jobs
---
.github/workflows/ci.yml | 2 ++
1 file changed, 2 insertions(+)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 81f6dc2..04b083c 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -10,6 +10,7 @@ on:
jobs:
lint:
+ timeout-minutes: 10
name: lint
runs-on: ubuntu-latest
steps:
@@ -30,6 +31,7 @@ jobs:
run: ./scripts/lint
test:
+ timeout-minutes: 10
name: test
runs-on: ubuntu-latest
steps:
From 57473e25e03b551ab85b4d2ec484defdcc2de09d Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 03:03:32 +0000
Subject: [PATCH 02/10] chore(internal): import reformatting
---
src/isaacus/_client.py | 5 +----
src/isaacus/resources/classifications/universal.py | 5 +----
src/isaacus/resources/rerankings.py | 5 +----
3 files changed, 3 insertions(+), 12 deletions(-)
diff --git a/src/isaacus/_client.py b/src/isaacus/_client.py
index 99ee81b..4447c55 100644
--- a/src/isaacus/_client.py
+++ b/src/isaacus/_client.py
@@ -19,10 +19,7 @@
ProxiesTypes,
RequestOptions,
)
-from ._utils import (
- is_given,
- get_async_library,
-)
+from ._utils import is_given, get_async_library
from ._version import __version__
from .resources import rerankings
from ._streaming import Stream as Stream, AsyncStream as AsyncStream
diff --git a/src/isaacus/resources/classifications/universal.py b/src/isaacus/resources/classifications/universal.py
index fd3c76b..763cbaa 100644
--- a/src/isaacus/resources/classifications/universal.py
+++ b/src/isaacus/resources/classifications/universal.py
@@ -8,10 +8,7 @@
import httpx
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from ..._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
from ..._response import (
diff --git a/src/isaacus/resources/rerankings.py b/src/isaacus/resources/rerankings.py
index b32362d..35fd0a7 100644
--- a/src/isaacus/resources/rerankings.py
+++ b/src/isaacus/resources/rerankings.py
@@ -9,10 +9,7 @@
from ..types import reranking_create_params
from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
-from .._utils import (
- maybe_transform,
- async_maybe_transform,
-)
+from .._utils import maybe_transform, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
from .._response import (
From 6dc4e32ab00e83d2307bfb729222f66f24a1f45f Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 03:04:52 +0000
Subject: [PATCH 03/10] chore(internal): fix list file params
---
src/isaacus/_utils/_utils.py | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/src/isaacus/_utils/_utils.py b/src/isaacus/_utils/_utils.py
index e5811bb..ea3cf3f 100644
--- a/src/isaacus/_utils/_utils.py
+++ b/src/isaacus/_utils/_utils.py
@@ -72,8 +72,16 @@ def _extract_items(
from .._files import assert_is_file_content
# We have exhausted the path, return the entry we found.
- assert_is_file_content(obj, key=flattened_key)
assert flattened_key is not None
+
+ if is_list(obj):
+ files: list[tuple[str, FileTypes]] = []
+ for entry in obj:
+ assert_is_file_content(entry, key=flattened_key + "[]" if flattened_key else "")
+ files.append((flattened_key + "[]", cast(FileTypes, entry)))
+ return files
+
+ assert_is_file_content(obj, key=flattened_key)
return [(flattened_key, cast(FileTypes, obj))]
index += 1
From 513599ce261e2ec9a034715e20ec150025186255 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 03:05:34 +0000
Subject: [PATCH 04/10] chore(internal): refactor retries to not use recursion
---
src/isaacus/_base_client.py | 414 +++++++++++++++---------------------
1 file changed, 175 insertions(+), 239 deletions(-)
diff --git a/src/isaacus/_base_client.py b/src/isaacus/_base_client.py
index 34d082e..531d5b9 100644
--- a/src/isaacus/_base_client.py
+++ b/src/isaacus/_base_client.py
@@ -412,8 +412,7 @@ def _build_headers(self, options: FinalRequestOptions, *, retries_taken: int = 0
headers = httpx.Headers(headers_dict)
idempotency_header = self._idempotency_header
- if idempotency_header and options.method.lower() != "get" and idempotency_header not in headers:
- options.idempotency_key = options.idempotency_key or self._idempotency_key()
+ if idempotency_header and options.idempotency_key and idempotency_header not in headers:
headers[idempotency_header] = options.idempotency_key
# Don't set these headers if they were already set or removed by the caller. We check
@@ -878,7 +877,6 @@ def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
- remaining_retries: Optional[int] = None,
*,
stream: Literal[True],
stream_cls: Type[_StreamT],
@@ -889,7 +887,6 @@ def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
- remaining_retries: Optional[int] = None,
*,
stream: Literal[False] = False,
) -> ResponseT: ...
@@ -899,7 +896,6 @@ def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
- remaining_retries: Optional[int] = None,
*,
stream: bool = False,
stream_cls: Type[_StreamT] | None = None,
@@ -909,125 +905,109 @@ def request(
self,
cast_to: Type[ResponseT],
options: FinalRequestOptions,
- remaining_retries: Optional[int] = None,
*,
stream: bool = False,
stream_cls: type[_StreamT] | None = None,
) -> ResponseT | _StreamT:
- if remaining_retries is not None:
- retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
- else:
- retries_taken = 0
-
- return self._request(
- cast_to=cast_to,
- options=options,
- stream=stream,
- stream_cls=stream_cls,
- retries_taken=retries_taken,
- )
+ cast_to = self._maybe_override_cast_to(cast_to, options)
- def _request(
- self,
- *,
- cast_to: Type[ResponseT],
- options: FinalRequestOptions,
- retries_taken: int,
- stream: bool,
- stream_cls: type[_StreamT] | None,
- ) -> ResponseT | _StreamT:
# create a copy of the options we were given so that if the
# options are mutated later & we then retry, the retries are
# given the original options
input_options = model_copy(options)
-
- cast_to = self._maybe_override_cast_to(cast_to, options)
- options = self._prepare_options(options)
-
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
- request = self._build_request(options, retries_taken=retries_taken)
- self._prepare_request(request)
-
- if options.idempotency_key:
+ if input_options.idempotency_key is None and input_options.method.lower() != "get":
# ensure the idempotency key is reused between requests
- input_options.idempotency_key = options.idempotency_key
+ input_options.idempotency_key = self._idempotency_key()
- kwargs: HttpxSendArgs = {}
- if self.custom_auth is not None:
- kwargs["auth"] = self.custom_auth
+ response: httpx.Response | None = None
+ max_retries = input_options.get_max_retries(self.max_retries)
- log.debug("Sending HTTP Request: %s %s", request.method, request.url)
+ retries_taken = 0
+ for retries_taken in range(max_retries + 1):
+ options = model_copy(input_options)
+ options = self._prepare_options(options)
- try:
- response = self._client.send(
- request,
- stream=stream or self._should_stream_response_body(request=request),
- **kwargs,
- )
- except httpx.TimeoutException as err:
- log.debug("Encountered httpx.TimeoutException", exc_info=True)
+ remaining_retries = max_retries - retries_taken
+ request = self._build_request(options, retries_taken=retries_taken)
+ self._prepare_request(request)
- if remaining_retries > 0:
- return self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- stream=stream,
- stream_cls=stream_cls,
- response_headers=None,
- )
+ kwargs: HttpxSendArgs = {}
+ if self.custom_auth is not None:
+ kwargs["auth"] = self.custom_auth
- log.debug("Raising timeout error")
- raise APITimeoutError(request=request) from err
- except Exception as err:
- log.debug("Encountered Exception", exc_info=True)
+ log.debug("Sending HTTP Request: %s %s", request.method, request.url)
- if remaining_retries > 0:
- return self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- stream=stream,
- stream_cls=stream_cls,
- response_headers=None,
+ response = None
+ try:
+ response = self._client.send(
+ request,
+ stream=stream or self._should_stream_response_body(request=request),
+ **kwargs,
)
+ except httpx.TimeoutException as err:
+ log.debug("Encountered httpx.TimeoutException", exc_info=True)
+
+ if remaining_retries > 0:
+ self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=None,
+ )
+ continue
+
+ log.debug("Raising timeout error")
+ raise APITimeoutError(request=request) from err
+ except Exception as err:
+ log.debug("Encountered Exception", exc_info=True)
+
+ if remaining_retries > 0:
+ self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=None,
+ )
+ continue
+
+ log.debug("Raising connection error")
+ raise APIConnectionError(request=request) from err
+
+ log.debug(
+ 'HTTP Response: %s %s "%i %s" %s',
+ request.method,
+ request.url,
+ response.status_code,
+ response.reason_phrase,
+ response.headers,
+ )
- log.debug("Raising connection error")
- raise APIConnectionError(request=request) from err
-
- log.debug(
- 'HTTP Response: %s %s "%i %s" %s',
- request.method,
- request.url,
- response.status_code,
- response.reason_phrase,
- response.headers,
- )
+ try:
+ response.raise_for_status()
+ except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
+ log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
+
+ if remaining_retries > 0 and self._should_retry(err.response):
+ err.response.close()
+ self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=response,
+ )
+ continue
- try:
- response.raise_for_status()
- except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
- log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
-
- if remaining_retries > 0 and self._should_retry(err.response):
- err.response.close()
- return self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- response_headers=err.response.headers,
- stream=stream,
- stream_cls=stream_cls,
- )
+ # If the response is streamed then we need to explicitly read the response
+ # to completion before attempting to access the response text.
+ if not err.response.is_closed:
+ err.response.read()
- # If the response is streamed then we need to explicitly read the response
- # to completion before attempting to access the response text.
- if not err.response.is_closed:
- err.response.read()
+ log.debug("Re-raising status error")
+ raise self._make_status_error_from_response(err.response) from None
- log.debug("Re-raising status error")
- raise self._make_status_error_from_response(err.response) from None
+ break
+ assert response is not None, "could not resolve response (should never happen)"
return self._process_response(
cast_to=cast_to,
options=options,
@@ -1037,37 +1017,20 @@ def _request(
retries_taken=retries_taken,
)
- def _retry_request(
- self,
- options: FinalRequestOptions,
- cast_to: Type[ResponseT],
- *,
- retries_taken: int,
- response_headers: httpx.Headers | None,
- stream: bool,
- stream_cls: type[_StreamT] | None,
- ) -> ResponseT | _StreamT:
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+ def _sleep_for_retry(
+ self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
+ ) -> None:
+ remaining_retries = max_retries - retries_taken
if remaining_retries == 1:
log.debug("1 retry left")
else:
log.debug("%i retries left", remaining_retries)
- timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
+ timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
log.info("Retrying request to %s in %f seconds", options.url, timeout)
- # In a synchronous context we are blocking the entire thread. Up to the library user to run the client in a
- # different thread if necessary.
time.sleep(timeout)
- return self._request(
- options=options,
- cast_to=cast_to,
- retries_taken=retries_taken + 1,
- stream=stream,
- stream_cls=stream_cls,
- )
-
def _process_response(
self,
*,
@@ -1411,7 +1374,6 @@ async def request(
options: FinalRequestOptions,
*,
stream: Literal[False] = False,
- remaining_retries: Optional[int] = None,
) -> ResponseT: ...
@overload
@@ -1422,7 +1384,6 @@ async def request(
*,
stream: Literal[True],
stream_cls: type[_AsyncStreamT],
- remaining_retries: Optional[int] = None,
) -> _AsyncStreamT: ...
@overload
@@ -1433,7 +1394,6 @@ async def request(
*,
stream: bool,
stream_cls: type[_AsyncStreamT] | None = None,
- remaining_retries: Optional[int] = None,
) -> ResponseT | _AsyncStreamT: ...
async def request(
@@ -1443,120 +1403,111 @@ async def request(
*,
stream: bool = False,
stream_cls: type[_AsyncStreamT] | None = None,
- remaining_retries: Optional[int] = None,
- ) -> ResponseT | _AsyncStreamT:
- if remaining_retries is not None:
- retries_taken = options.get_max_retries(self.max_retries) - remaining_retries
- else:
- retries_taken = 0
-
- return await self._request(
- cast_to=cast_to,
- options=options,
- stream=stream,
- stream_cls=stream_cls,
- retries_taken=retries_taken,
- )
-
- async def _request(
- self,
- cast_to: Type[ResponseT],
- options: FinalRequestOptions,
- *,
- stream: bool,
- stream_cls: type[_AsyncStreamT] | None,
- retries_taken: int,
) -> ResponseT | _AsyncStreamT:
if self._platform is None:
# `get_platform` can make blocking IO calls so we
# execute it earlier while we are in an async context
self._platform = await asyncify(get_platform)()
+ cast_to = self._maybe_override_cast_to(cast_to, options)
+
# create a copy of the options we were given so that if the
# options are mutated later & we then retry, the retries are
# given the original options
input_options = model_copy(options)
-
- cast_to = self._maybe_override_cast_to(cast_to, options)
- options = await self._prepare_options(options)
-
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
- request = self._build_request(options, retries_taken=retries_taken)
- await self._prepare_request(request)
-
- if options.idempotency_key:
+ if input_options.idempotency_key is None and input_options.method.lower() != "get":
# ensure the idempotency key is reused between requests
- input_options.idempotency_key = options.idempotency_key
+ input_options.idempotency_key = self._idempotency_key()
- kwargs: HttpxSendArgs = {}
- if self.custom_auth is not None:
- kwargs["auth"] = self.custom_auth
+ response: httpx.Response | None = None
+ max_retries = input_options.get_max_retries(self.max_retries)
- try:
- response = await self._client.send(
- request,
- stream=stream or self._should_stream_response_body(request=request),
- **kwargs,
- )
- except httpx.TimeoutException as err:
- log.debug("Encountered httpx.TimeoutException", exc_info=True)
+ retries_taken = 0
+ for retries_taken in range(max_retries + 1):
+ options = model_copy(input_options)
+ options = await self._prepare_options(options)
- if remaining_retries > 0:
- return await self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- stream=stream,
- stream_cls=stream_cls,
- response_headers=None,
- )
+ remaining_retries = max_retries - retries_taken
+ request = self._build_request(options, retries_taken=retries_taken)
+ await self._prepare_request(request)
- log.debug("Raising timeout error")
- raise APITimeoutError(request=request) from err
- except Exception as err:
- log.debug("Encountered Exception", exc_info=True)
+ kwargs: HttpxSendArgs = {}
+ if self.custom_auth is not None:
+ kwargs["auth"] = self.custom_auth
- if remaining_retries > 0:
- return await self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- stream=stream,
- stream_cls=stream_cls,
- response_headers=None,
- )
+ log.debug("Sending HTTP Request: %s %s", request.method, request.url)
- log.debug("Raising connection error")
- raise APIConnectionError(request=request) from err
+ response = None
+ try:
+ response = await self._client.send(
+ request,
+ stream=stream or self._should_stream_response_body(request=request),
+ **kwargs,
+ )
+ except httpx.TimeoutException as err:
+ log.debug("Encountered httpx.TimeoutException", exc_info=True)
+
+ if remaining_retries > 0:
+ await self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=None,
+ )
+ continue
+
+ log.debug("Raising timeout error")
+ raise APITimeoutError(request=request) from err
+ except Exception as err:
+ log.debug("Encountered Exception", exc_info=True)
+
+ if remaining_retries > 0:
+ await self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=None,
+ )
+ continue
+
+ log.debug("Raising connection error")
+ raise APIConnectionError(request=request) from err
+
+ log.debug(
+ 'HTTP Response: %s %s "%i %s" %s',
+ request.method,
+ request.url,
+ response.status_code,
+ response.reason_phrase,
+ response.headers,
+ )
- log.debug(
- 'HTTP Request: %s %s "%i %s"', request.method, request.url, response.status_code, response.reason_phrase
- )
+ try:
+ response.raise_for_status()
+ except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
+ log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
+
+ if remaining_retries > 0 and self._should_retry(err.response):
+ await err.response.aclose()
+ await self._sleep_for_retry(
+ retries_taken=retries_taken,
+ max_retries=max_retries,
+ options=input_options,
+ response=response,
+ )
+ continue
- try:
- response.raise_for_status()
- except httpx.HTTPStatusError as err: # thrown on 4xx and 5xx status code
- log.debug("Encountered httpx.HTTPStatusError", exc_info=True)
-
- if remaining_retries > 0 and self._should_retry(err.response):
- await err.response.aclose()
- return await self._retry_request(
- input_options,
- cast_to,
- retries_taken=retries_taken,
- response_headers=err.response.headers,
- stream=stream,
- stream_cls=stream_cls,
- )
+ # If the response is streamed then we need to explicitly read the response
+ # to completion before attempting to access the response text.
+ if not err.response.is_closed:
+ await err.response.aread()
- # If the response is streamed then we need to explicitly read the response
- # to completion before attempting to access the response text.
- if not err.response.is_closed:
- await err.response.aread()
+ log.debug("Re-raising status error")
+ raise self._make_status_error_from_response(err.response) from None
- log.debug("Re-raising status error")
- raise self._make_status_error_from_response(err.response) from None
+ break
+ assert response is not None, "could not resolve response (should never happen)"
return await self._process_response(
cast_to=cast_to,
options=options,
@@ -1566,35 +1517,20 @@ async def _request(
retries_taken=retries_taken,
)
- async def _retry_request(
- self,
- options: FinalRequestOptions,
- cast_to: Type[ResponseT],
- *,
- retries_taken: int,
- response_headers: httpx.Headers | None,
- stream: bool,
- stream_cls: type[_AsyncStreamT] | None,
- ) -> ResponseT | _AsyncStreamT:
- remaining_retries = options.get_max_retries(self.max_retries) - retries_taken
+ async def _sleep_for_retry(
+ self, *, retries_taken: int, max_retries: int, options: FinalRequestOptions, response: httpx.Response | None
+ ) -> None:
+ remaining_retries = max_retries - retries_taken
if remaining_retries == 1:
log.debug("1 retry left")
else:
log.debug("%i retries left", remaining_retries)
- timeout = self._calculate_retry_timeout(remaining_retries, options, response_headers)
+ timeout = self._calculate_retry_timeout(remaining_retries, options, response.headers if response else None)
log.info("Retrying request to %s in %f seconds", options.url, timeout)
await anyio.sleep(timeout)
- return await self._request(
- options=options,
- cast_to=cast_to,
- retries_taken=retries_taken + 1,
- stream=stream,
- stream_cls=stream_cls,
- )
-
async def _process_response(
self,
*,
From 40be0d5d7bb0c4d5187c0207e6470800e9827216 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 23 Apr 2025 03:06:10 +0000
Subject: [PATCH 05/10] fix(pydantic v1): more robust ModelField.annotation
check
---
src/isaacus/_models.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/isaacus/_models.py b/src/isaacus/_models.py
index 58b9263..798956f 100644
--- a/src/isaacus/_models.py
+++ b/src/isaacus/_models.py
@@ -626,8 +626,8 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
# Note: if one variant defines an alias then they all should
discriminator_alias = field_info.alias
- if field_info.annotation and is_literal_type(field_info.annotation):
- for entry in get_args(field_info.annotation):
+ if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
+ for entry in get_args(annotation):
if isinstance(entry, str):
mapping[entry] = variant
From 8860ae0393429d660038ce1c8d15020a42141979 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 24 Apr 2025 02:25:57 +0000
Subject: [PATCH 06/10] chore(internal): codegen related update
---
.github/workflows/ci.yml | 16 ++++++++--------
.github/workflows/publish-pypi.yml | 2 +-
.github/workflows/release-doctor.yml | 2 +-
3 files changed, 10 insertions(+), 10 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 04b083c..3382042 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,18 +1,18 @@
name: CI
on:
push:
- branches:
- - main
- pull_request:
- branches:
- - main
- - next
+ branches-ignore:
+ - 'generated'
+ - 'codegen/**'
+ - 'integrated/**'
+ - 'stl-preview-head/**'
+ - 'stl-preview-base/**'
jobs:
lint:
timeout-minutes: 10
name: lint
- runs-on: ubuntu-latest
+ runs-on: depot-ubuntu-24.04
steps:
- uses: actions/checkout@v4
@@ -33,7 +33,7 @@ jobs:
test:
timeout-minutes: 10
name: test
- runs-on: ubuntu-latest
+ runs-on: depot-ubuntu-24.04
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
index 04538cd..b416079 100644
--- a/.github/workflows/publish-pypi.yml
+++ b/.github/workflows/publish-pypi.yml
@@ -11,7 +11,7 @@ on:
jobs:
publish:
name: publish
- runs-on: ubuntu-latest
+ runs-on: depot-ubuntu-24.04
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
index e1eed33..f1244dd 100644
--- a/.github/workflows/release-doctor.yml
+++ b/.github/workflows/release-doctor.yml
@@ -8,7 +8,7 @@ on:
jobs:
release_doctor:
name: release doctor
- runs-on: ubuntu-latest
+ runs-on: depot-ubuntu-24.04
if: github.repository == 'isaacus-dev/isaacus-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
steps:
From 869c0ff5824ccfd63a4123a026530df11352db44 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 24 Apr 2025 02:26:26 +0000
Subject: [PATCH 07/10] chore(ci): only use depot for staging repos
---
.github/workflows/ci.yml | 4 ++--
.github/workflows/publish-pypi.yml | 2 +-
.github/workflows/release-doctor.yml | 2 +-
3 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 3382042..d8ec396 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -12,7 +12,7 @@ jobs:
lint:
timeout-minutes: 10
name: lint
- runs-on: depot-ubuntu-24.04
+ runs-on: ${{ github.repository == 'stainless-sdks/isaacus-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
steps:
- uses: actions/checkout@v4
@@ -33,7 +33,7 @@ jobs:
test:
timeout-minutes: 10
name: test
- runs-on: depot-ubuntu-24.04
+ runs-on: ${{ github.repository == 'stainless-sdks/isaacus-python' && 'depot-ubuntu-24.04' || 'ubuntu-latest' }}
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/publish-pypi.yml b/.github/workflows/publish-pypi.yml
index b416079..04538cd 100644
--- a/.github/workflows/publish-pypi.yml
+++ b/.github/workflows/publish-pypi.yml
@@ -11,7 +11,7 @@ on:
jobs:
publish:
name: publish
- runs-on: depot-ubuntu-24.04
+ runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
diff --git a/.github/workflows/release-doctor.yml b/.github/workflows/release-doctor.yml
index f1244dd..e1eed33 100644
--- a/.github/workflows/release-doctor.yml
+++ b/.github/workflows/release-doctor.yml
@@ -8,7 +8,7 @@ on:
jobs:
release_doctor:
name: release doctor
- runs-on: depot-ubuntu-24.04
+ runs-on: ubuntu-latest
if: github.repository == 'isaacus-dev/isaacus-python' && (github.event_name == 'push' || github.event_name == 'workflow_dispatch' || startsWith(github.head_ref, 'release-please') || github.head_ref == 'next')
steps:
From ef18419dc26bba05aec8f5e29711bcc6fe329e9e Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Thu, 24 Apr 2025 02:27:40 +0000
Subject: [PATCH 08/10] chore: broadly detect json family of content-type
headers
---
src/isaacus/_response.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/isaacus/_response.py b/src/isaacus/_response.py
index cce88fd..392418a 100644
--- a/src/isaacus/_response.py
+++ b/src/isaacus/_response.py
@@ -233,7 +233,7 @@ def _parse(self, *, to: type[_T] | None = None) -> R | _T:
# split is required to handle cases where additional information is included
# in the response, e.g. application/json; charset=utf-8
content_type, *_ = response.headers.get("content-type", "*").split(";")
- if content_type != "application/json":
+ if not content_type.endswith("json"):
if is_basemodel(cast_to):
try:
data = response.json()
From 7b9856c7a64fd4694d0fe8436934fa520faa38cc Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 30 Apr 2025 08:13:27 +0000
Subject: [PATCH 09/10] feat(api): introduced extractive QA
---
.stats.yml | 8 +-
api.md | 14 +
src/isaacus/_client.py | 9 +
src/isaacus/resources/__init__.py | 14 +
.../resources/classifications/universal.py | 4 +-
src/isaacus/resources/extractions/__init__.py | 33 +++
.../resources/extractions/extractions.py | 102 +++++++
src/isaacus/resources/extractions/qa.py | 258 ++++++++++++++++++
src/isaacus/resources/rerankings.py | 4 +-
.../universal_create_params.py | 2 +-
src/isaacus/types/extractions/__init__.py | 6 +
.../types/extractions/answer_extraction.py | 71 +++++
.../types/extractions/qa_create_params.py | 64 +++++
src/isaacus/types/reranking_create_params.py | 2 +-
tests/api_resources/extractions/__init__.py | 1 +
tests/api_resources/extractions/test_qa.py | 152 +++++++++++
16 files changed, 734 insertions(+), 10 deletions(-)
create mode 100644 src/isaacus/resources/extractions/__init__.py
create mode 100644 src/isaacus/resources/extractions/extractions.py
create mode 100644 src/isaacus/resources/extractions/qa.py
create mode 100644 src/isaacus/types/extractions/__init__.py
create mode 100644 src/isaacus/types/extractions/answer_extraction.py
create mode 100644 src/isaacus/types/extractions/qa_create_params.py
create mode 100644 tests/api_resources/extractions/__init__.py
create mode 100644 tests/api_resources/extractions/test_qa.py
diff --git a/.stats.yml b/.stats.yml
index da5fed2..1973072 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 2
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/isaacus%2Fisaacus-f1ac3a46ba560544c348931204998163a9c5e35c40d6096f4b078e38c2714756.yml
-openapi_spec_hash: c9ba9f53c0dabdb703461d772020952f
-config_hash: 1d15d860383a3f6da1ac388297687cc9
+configured_endpoints: 3
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/isaacus%2Fisaacus-bc0272bf0a53ec37ca25f4a8b76bca4b90cf33c5f41816edf460daa642a5a84a.yml
+openapi_spec_hash: 79f28fedf89e1b719f464c064847a630
+config_hash: bfe30148ec88e8bbbf4a348a9fdfc00a
diff --git a/api.md b/api.md
index 58ab695..a3a6143 100644
--- a/api.md
+++ b/api.md
@@ -23,3 +23,17 @@ from isaacus.types import Reranking
Methods:
- client.rerankings.create(\*\*params) -> Reranking
+
+# Extractions
+
+## Qa
+
+Types:
+
+```python
+from isaacus.types.extractions import AnswerExtraction
+```
+
+Methods:
+
+- client.extractions.qa.create(\*\*params) -> AnswerExtraction
diff --git a/src/isaacus/_client.py b/src/isaacus/_client.py
index 4447c55..35105e6 100644
--- a/src/isaacus/_client.py
+++ b/src/isaacus/_client.py
@@ -29,6 +29,7 @@
SyncAPIClient,
AsyncAPIClient,
)
+from .resources.extractions import extractions
from .resources.classifications import classifications
__all__ = ["Timeout", "Transport", "ProxiesTypes", "RequestOptions", "Isaacus", "AsyncIsaacus", "Client", "AsyncClient"]
@@ -37,6 +38,7 @@
class Isaacus(SyncAPIClient):
classifications: classifications.ClassificationsResource
rerankings: rerankings.RerankingsResource
+ extractions: extractions.ExtractionsResource
with_raw_response: IsaacusWithRawResponse
with_streaming_response: IsaacusWithStreamedResponse
@@ -96,6 +98,7 @@ def __init__(
self.classifications = classifications.ClassificationsResource(self)
self.rerankings = rerankings.RerankingsResource(self)
+ self.extractions = extractions.ExtractionsResource(self)
self.with_raw_response = IsaacusWithRawResponse(self)
self.with_streaming_response = IsaacusWithStreamedResponse(self)
@@ -207,6 +210,7 @@ def _make_status_error(
class AsyncIsaacus(AsyncAPIClient):
classifications: classifications.AsyncClassificationsResource
rerankings: rerankings.AsyncRerankingsResource
+ extractions: extractions.AsyncExtractionsResource
with_raw_response: AsyncIsaacusWithRawResponse
with_streaming_response: AsyncIsaacusWithStreamedResponse
@@ -266,6 +270,7 @@ def __init__(
self.classifications = classifications.AsyncClassificationsResource(self)
self.rerankings = rerankings.AsyncRerankingsResource(self)
+ self.extractions = extractions.AsyncExtractionsResource(self)
self.with_raw_response = AsyncIsaacusWithRawResponse(self)
self.with_streaming_response = AsyncIsaacusWithStreamedResponse(self)
@@ -378,24 +383,28 @@ class IsaacusWithRawResponse:
def __init__(self, client: Isaacus) -> None:
self.classifications = classifications.ClassificationsResourceWithRawResponse(client.classifications)
self.rerankings = rerankings.RerankingsResourceWithRawResponse(client.rerankings)
+ self.extractions = extractions.ExtractionsResourceWithRawResponse(client.extractions)
class AsyncIsaacusWithRawResponse:
def __init__(self, client: AsyncIsaacus) -> None:
self.classifications = classifications.AsyncClassificationsResourceWithRawResponse(client.classifications)
self.rerankings = rerankings.AsyncRerankingsResourceWithRawResponse(client.rerankings)
+ self.extractions = extractions.AsyncExtractionsResourceWithRawResponse(client.extractions)
class IsaacusWithStreamedResponse:
def __init__(self, client: Isaacus) -> None:
self.classifications = classifications.ClassificationsResourceWithStreamingResponse(client.classifications)
self.rerankings = rerankings.RerankingsResourceWithStreamingResponse(client.rerankings)
+ self.extractions = extractions.ExtractionsResourceWithStreamingResponse(client.extractions)
class AsyncIsaacusWithStreamedResponse:
def __init__(self, client: AsyncIsaacus) -> None:
self.classifications = classifications.AsyncClassificationsResourceWithStreamingResponse(client.classifications)
self.rerankings = rerankings.AsyncRerankingsResourceWithStreamingResponse(client.rerankings)
+ self.extractions = extractions.AsyncExtractionsResourceWithStreamingResponse(client.extractions)
Client = Isaacus
diff --git a/src/isaacus/resources/__init__.py b/src/isaacus/resources/__init__.py
index 3cc3129..e55176e 100644
--- a/src/isaacus/resources/__init__.py
+++ b/src/isaacus/resources/__init__.py
@@ -8,6 +8,14 @@
RerankingsResourceWithStreamingResponse,
AsyncRerankingsResourceWithStreamingResponse,
)
+from .extractions import (
+ ExtractionsResource,
+ AsyncExtractionsResource,
+ ExtractionsResourceWithRawResponse,
+ AsyncExtractionsResourceWithRawResponse,
+ ExtractionsResourceWithStreamingResponse,
+ AsyncExtractionsResourceWithStreamingResponse,
+)
from .classifications import (
ClassificationsResource,
AsyncClassificationsResource,
@@ -30,4 +38,10 @@
"AsyncRerankingsResourceWithRawResponse",
"RerankingsResourceWithStreamingResponse",
"AsyncRerankingsResourceWithStreamingResponse",
+ "ExtractionsResource",
+ "AsyncExtractionsResource",
+ "ExtractionsResourceWithRawResponse",
+ "AsyncExtractionsResourceWithRawResponse",
+ "ExtractionsResourceWithStreamingResponse",
+ "AsyncExtractionsResourceWithStreamingResponse",
]
diff --git a/src/isaacus/resources/classifications/universal.py b/src/isaacus/resources/classifications/universal.py
index 763cbaa..54fb538 100644
--- a/src/isaacus/resources/classifications/universal.py
+++ b/src/isaacus/resources/classifications/universal.py
@@ -78,7 +78,7 @@ def create(
texts: The texts to classify.
- The texts must contain at least one non-whitespace character.
+ Each text must contain at least one non-whitespace character.
chunking_options: Options for how to split text into smaller chunks.
@@ -179,7 +179,7 @@ async def create(
texts: The texts to classify.
- The texts must contain at least one non-whitespace character.
+ Each text must contain at least one non-whitespace character.
chunking_options: Options for how to split text into smaller chunks.
diff --git a/src/isaacus/resources/extractions/__init__.py b/src/isaacus/resources/extractions/__init__.py
new file mode 100644
index 0000000..820c06a
--- /dev/null
+++ b/src/isaacus/resources/extractions/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .qa import (
+ QaResource,
+ AsyncQaResource,
+ QaResourceWithRawResponse,
+ AsyncQaResourceWithRawResponse,
+ QaResourceWithStreamingResponse,
+ AsyncQaResourceWithStreamingResponse,
+)
+from .extractions import (
+ ExtractionsResource,
+ AsyncExtractionsResource,
+ ExtractionsResourceWithRawResponse,
+ AsyncExtractionsResourceWithRawResponse,
+ ExtractionsResourceWithStreamingResponse,
+ AsyncExtractionsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "QaResource",
+ "AsyncQaResource",
+ "QaResourceWithRawResponse",
+ "AsyncQaResourceWithRawResponse",
+ "QaResourceWithStreamingResponse",
+ "AsyncQaResourceWithStreamingResponse",
+ "ExtractionsResource",
+ "AsyncExtractionsResource",
+ "ExtractionsResourceWithRawResponse",
+ "AsyncExtractionsResourceWithRawResponse",
+ "ExtractionsResourceWithStreamingResponse",
+ "AsyncExtractionsResourceWithStreamingResponse",
+]
diff --git a/src/isaacus/resources/extractions/extractions.py b/src/isaacus/resources/extractions/extractions.py
new file mode 100644
index 0000000..a967336
--- /dev/null
+++ b/src/isaacus/resources/extractions/extractions.py
@@ -0,0 +1,102 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .qa import (
+ QaResource,
+ AsyncQaResource,
+ QaResourceWithRawResponse,
+ AsyncQaResourceWithRawResponse,
+ QaResourceWithStreamingResponse,
+ AsyncQaResourceWithStreamingResponse,
+)
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+
+__all__ = ["ExtractionsResource", "AsyncExtractionsResource"]
+
+
+class ExtractionsResource(SyncAPIResource):
+ @cached_property
+ def qa(self) -> QaResource:
+ return QaResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> ExtractionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#accessing-raw-response-data-eg-headers
+ """
+ return ExtractionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ExtractionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#with_streaming_response
+ """
+ return ExtractionsResourceWithStreamingResponse(self)
+
+
+class AsyncExtractionsResource(AsyncAPIResource):
+ @cached_property
+ def qa(self) -> AsyncQaResource:
+ return AsyncQaResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncExtractionsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncExtractionsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncExtractionsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#with_streaming_response
+ """
+ return AsyncExtractionsResourceWithStreamingResponse(self)
+
+
+class ExtractionsResourceWithRawResponse:
+ def __init__(self, extractions: ExtractionsResource) -> None:
+ self._extractions = extractions
+
+ @cached_property
+ def qa(self) -> QaResourceWithRawResponse:
+ return QaResourceWithRawResponse(self._extractions.qa)
+
+
+class AsyncExtractionsResourceWithRawResponse:
+ def __init__(self, extractions: AsyncExtractionsResource) -> None:
+ self._extractions = extractions
+
+ @cached_property
+ def qa(self) -> AsyncQaResourceWithRawResponse:
+ return AsyncQaResourceWithRawResponse(self._extractions.qa)
+
+
+class ExtractionsResourceWithStreamingResponse:
+ def __init__(self, extractions: ExtractionsResource) -> None:
+ self._extractions = extractions
+
+ @cached_property
+ def qa(self) -> QaResourceWithStreamingResponse:
+ return QaResourceWithStreamingResponse(self._extractions.qa)
+
+
+class AsyncExtractionsResourceWithStreamingResponse:
+ def __init__(self, extractions: AsyncExtractionsResource) -> None:
+ self._extractions = extractions
+
+ @cached_property
+ def qa(self) -> AsyncQaResourceWithStreamingResponse:
+ return AsyncQaResourceWithStreamingResponse(self._extractions.qa)
diff --git a/src/isaacus/resources/extractions/qa.py b/src/isaacus/resources/extractions/qa.py
new file mode 100644
index 0000000..bda699f
--- /dev/null
+++ b/src/isaacus/resources/extractions/qa.py
@@ -0,0 +1,258 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import Literal
+
+import httpx
+
+from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.extractions import qa_create_params
+from ...types.extractions.answer_extraction import AnswerExtraction
+
+__all__ = ["QaResource", "AsyncQaResource"]
+
+
+class QaResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> QaResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#accessing-raw-response-data-eg-headers
+ """
+ return QaResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> QaResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#with_streaming_response
+ """
+ return QaResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ model: Literal["kanon-answer-extractor", "kanon-answer-extractor-mini"],
+ query: str,
+ texts: List[str],
+ chunking_options: Optional[qa_create_params.ChunkingOptions] | NotGiven = NOT_GIVEN,
+ ignore_inextractability: bool | NotGiven = NOT_GIVEN,
+ top_k: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnswerExtraction:
+ """
+ Extract answers to questions from legal documents with an Isaacus legal AI
+ answer extractor.
+
+ Args:
+ model: The ID of the [model](https://docs.isaacus.com/models#extractive-qa) to use for
+ extractive question answering.
+
+ query: The query to extract the answer to.
+
+ The query must contain at least one non-whitespace character.
+
+ Unlike the texts from which the answer will be extracted, the query cannot be so
+ long that it exceeds the maximum input length of the model.
+
+ texts: The texts to search for the answer in and extract the answer from.
+
+ There must be at least one text.
+
+ Each text must contain at least one non-whitespace character.
+
+ chunking_options: Options for how to split text into smaller chunks.
+
+ ignore_inextractability: Whether to, if the model's score of the likelihood that an answer can not be
+ extracted from a text is greater than the highest score of all possible answers,
+ still return the highest scoring answers for that text.
+
+ If you have already determined that the texts answer the query, for example, by
+ using one of our classification or reranker models, then you should set this to
+ `true`.
+
+ top_k: The number of highest scoring answers to return.
+
+ If `null`, which is the default, all answers will be returned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/extractions/qa",
+ body=maybe_transform(
+ {
+ "model": model,
+ "query": query,
+ "texts": texts,
+ "chunking_options": chunking_options,
+ "ignore_inextractability": ignore_inextractability,
+ "top_k": top_k,
+ },
+ qa_create_params.QaCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnswerExtraction,
+ )
+
+
+class AsyncQaResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncQaResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncQaResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncQaResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/isaacus-dev/isaacus-python#with_streaming_response
+ """
+ return AsyncQaResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ model: Literal["kanon-answer-extractor", "kanon-answer-extractor-mini"],
+ query: str,
+ texts: List[str],
+ chunking_options: Optional[qa_create_params.ChunkingOptions] | NotGiven = NOT_GIVEN,
+ ignore_inextractability: bool | NotGiven = NOT_GIVEN,
+ top_k: int | NotGiven = NOT_GIVEN,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
+ ) -> AnswerExtraction:
+ """
+ Extract answers to questions from legal documents with an Isaacus legal AI
+ answer extractor.
+
+ Args:
+ model: The ID of the [model](https://docs.isaacus.com/models#extractive-qa) to use for
+ extractive question answering.
+
+ query: The query to extract the answer to.
+
+ The query must contain at least one non-whitespace character.
+
+ Unlike the texts from which the answer will be extracted, the query cannot be so
+ long that it exceeds the maximum input length of the model.
+
+ texts: The texts to search for the answer in and extract the answer from.
+
+ There must be at least one text.
+
+ Each text must contain at least one non-whitespace character.
+
+ chunking_options: Options for how to split text into smaller chunks.
+
+ ignore_inextractability: Whether to, if the model's score of the likelihood that an answer can not be
+ extracted from a text is greater than the highest score of all possible answers,
+ still return the highest scoring answers for that text.
+
+ If you have already determined that the texts answer the query, for example, by
+ using one of our classification or reranker models, then you should set this to
+ `true`.
+
+ top_k: The number of highest scoring answers to return.
+
+ If `null`, which is the default, all answers will be returned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/extractions/qa",
+ body=await async_maybe_transform(
+ {
+ "model": model,
+ "query": query,
+ "texts": texts,
+ "chunking_options": chunking_options,
+ "ignore_inextractability": ignore_inextractability,
+ "top_k": top_k,
+ },
+ qa_create_params.QaCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=AnswerExtraction,
+ )
+
+
+class QaResourceWithRawResponse:
+ def __init__(self, qa: QaResource) -> None:
+ self._qa = qa
+
+ self.create = to_raw_response_wrapper(
+ qa.create,
+ )
+
+
+class AsyncQaResourceWithRawResponse:
+ def __init__(self, qa: AsyncQaResource) -> None:
+ self._qa = qa
+
+ self.create = async_to_raw_response_wrapper(
+ qa.create,
+ )
+
+
+class QaResourceWithStreamingResponse:
+ def __init__(self, qa: QaResource) -> None:
+ self._qa = qa
+
+ self.create = to_streamed_response_wrapper(
+ qa.create,
+ )
+
+
+class AsyncQaResourceWithStreamingResponse:
+ def __init__(self, qa: AsyncQaResource) -> None:
+ self._qa = qa
+
+ self.create = async_to_streamed_response_wrapper(
+ qa.create,
+ )
diff --git a/src/isaacus/resources/rerankings.py b/src/isaacus/resources/rerankings.py
index 35fd0a7..09561e2 100644
--- a/src/isaacus/resources/rerankings.py
+++ b/src/isaacus/resources/rerankings.py
@@ -80,7 +80,7 @@ def create(
There must be at least one text.
- The texts must contain at least one non-whitespace character.
+ Each text must contain at least one non-whitespace character.
chunking_options: Options for how to split text into smaller chunks.
@@ -191,7 +191,7 @@ async def create(
There must be at least one text.
- The texts must contain at least one non-whitespace character.
+ Each text must contain at least one non-whitespace character.
chunking_options: Options for how to split text into smaller chunks.
diff --git a/src/isaacus/types/classifications/universal_create_params.py b/src/isaacus/types/classifications/universal_create_params.py
index e699ac8..23ed39e 100644
--- a/src/isaacus/types/classifications/universal_create_params.py
+++ b/src/isaacus/types/classifications/universal_create_params.py
@@ -29,7 +29,7 @@ class UniversalCreateParams(TypedDict, total=False):
texts: Required[List[str]]
"""The texts to classify.
- The texts must contain at least one non-whitespace character.
+ Each text must contain at least one non-whitespace character.
"""
chunking_options: Optional[ChunkingOptions]
diff --git a/src/isaacus/types/extractions/__init__.py b/src/isaacus/types/extractions/__init__.py
new file mode 100644
index 0000000..ab167e2
--- /dev/null
+++ b/src/isaacus/types/extractions/__init__.py
@@ -0,0 +1,6 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .qa_create_params import QaCreateParams as QaCreateParams
+from .answer_extraction import AnswerExtraction as AnswerExtraction
diff --git a/src/isaacus/types/extractions/answer_extraction.py b/src/isaacus/types/extractions/answer_extraction.py
new file mode 100644
index 0000000..97e546a
--- /dev/null
+++ b/src/isaacus/types/extractions/answer_extraction.py
@@ -0,0 +1,71 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List
+
+from ..._models import BaseModel
+
+__all__ = ["AnswerExtraction", "Extraction", "ExtractionAnswer", "Usage"]
+
+
+class ExtractionAnswer(BaseModel):
+ end: int
+ """
+ The index of the character immediately after the last character of the answer in
+ the text, starting from `0` (such that, in Python, the answer is equivalent to
+ `text[start:end]`).
+ """
+
+ score: float
+ """
+ A score between `0` and `1`, inclusive, representing the strength of the answer.
+ """
+
+ start: int
+ """
+ The index of the first character of the answer in the text, starting from `0`
+ (and, therefore, ending at the number of characters in the text minus `1`).
+ """
+
+ text: str
+ """The text of the answer."""
+
+
+class Extraction(BaseModel):
+ answers: List[ExtractionAnswer]
+ """Answers extracted from the text, ordered from highest to lowest score."""
+
+ index: int
+ """
+ The index of the text in the input array of texts that this result represents,
+ starting from `0` (and, therefore, ending at the number of texts minus `1`).
+ """
+
+ inextractability_score: float
+ """
+ A score between `0` and `1`, inclusive, representing the likelihood that an
+ answer can not be extracted from the text.
+
+ Where this score is greater than the highest score of all possible answers, the
+ text should be regarded as not having an extractable answer to the query. If
+ that is the case and `ignore_inextractability` is `false`, no answers will be
+ returned.
+ """
+
+
+class Usage(BaseModel):
+ input_tokens: int
+ """The number of tokens inputted to the model."""
+
+
+class AnswerExtraction(BaseModel):
+ extractions: List[Extraction]
+ """
+ The results of extracting answers from the texts, ordered from highest to lowest
+ inextractability score.
+ """
+
+ usage: Usage
+ """
+ Statistics about the usage of resources in the process of extracting answers
+ from the texts.
+ """
diff --git a/src/isaacus/types/extractions/qa_create_params.py b/src/isaacus/types/extractions/qa_create_params.py
new file mode 100644
index 0000000..07b3e79
--- /dev/null
+++ b/src/isaacus/types/extractions/qa_create_params.py
@@ -0,0 +1,64 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import List, Optional
+from typing_extensions import Literal, Required, TypedDict
+
+__all__ = ["QaCreateParams", "ChunkingOptions"]
+
+
+class QaCreateParams(TypedDict, total=False):
+ model: Required[Literal["kanon-answer-extractor", "kanon-answer-extractor-mini"]]
+ """
+ The ID of the [model](https://docs.isaacus.com/models#extractive-qa) to use for
+ extractive question answering.
+ """
+
+ query: Required[str]
+ """The query to extract the answer to.
+
+ The query must contain at least one non-whitespace character.
+
+ Unlike the texts from which the answer will be extracted, the query cannot be so
+ long that it exceeds the maximum input length of the model.
+ """
+
+ texts: Required[List[str]]
+ """The texts to search for the answer in and extract the answer from.
+
+ There must be at least one text.
+
+ Each text must contain at least one non-whitespace character.
+ """
+
+ chunking_options: Optional[ChunkingOptions]
+ """Options for how to split text into smaller chunks."""
+
+ ignore_inextractability: bool
+ """
+ Whether to, if the model's score of the likelihood that an answer can not be
+ extracted from a text is greater than the highest score of all possible answers,
+ still return the highest scoring answers for that text.
+
+ If you have already determined that the texts answer the query, for example, by
+ using one of our classification or reranker models, then you should set this to
+ `true`.
+ """
+
+ top_k: int
+ """The number of highest scoring answers to return.
+
+ If `null`, which is the default, all answers will be returned.
+ """
+
+
+class ChunkingOptions(TypedDict, total=False):
+ overlap_ratio: Optional[float]
+ """A number greater than or equal to 0 and less than 1."""
+
+ overlap_tokens: Optional[int]
+ """A whole number greater than -1."""
+
+ size: Optional[int]
+ """A whole number greater than or equal to 1."""
diff --git a/src/isaacus/types/reranking_create_params.py b/src/isaacus/types/reranking_create_params.py
index bd82314..4c62677 100644
--- a/src/isaacus/types/reranking_create_params.py
+++ b/src/isaacus/types/reranking_create_params.py
@@ -29,7 +29,7 @@ class RerankingCreateParams(TypedDict, total=False):
There must be at least one text.
- The texts must contain at least one non-whitespace character.
+ Each text must contain at least one non-whitespace character.
"""
chunking_options: Optional[ChunkingOptions]
diff --git a/tests/api_resources/extractions/__init__.py b/tests/api_resources/extractions/__init__.py
new file mode 100644
index 0000000..fd8019a
--- /dev/null
+++ b/tests/api_resources/extractions/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/extractions/test_qa.py b/tests/api_resources/extractions/test_qa.py
new file mode 100644
index 0000000..2a5727a
--- /dev/null
+++ b/tests/api_resources/extractions/test_qa.py
@@ -0,0 +1,152 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from isaacus import Isaacus, AsyncIsaacus
+from tests.utils import assert_matches_type
+from isaacus.types.extractions import AnswerExtraction
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestQa:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create(self, client: Isaacus) -> None:
+ qa = client.extractions.qa.create(
+ model="kanon-answer-extractor",
+ query="What is the punishment for murder in Victoria?",
+ texts=[
+ "The standard sentence for murder in the State of Victoria is 30 years if the person murdered was a police officer and 25 years in any other case."
+ ],
+ )
+ assert_matches_type(AnswerExtraction, qa, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_method_create_with_all_params(self, client: Isaacus) -> None:
+ qa = client.extractions.qa.create(
+ model="kanon-answer-extractor",
+ query="What is the punishment for murder in Victoria?",
+ texts=[
+ "The standard sentence for murder in the State of Victoria is 30 years if the person murdered was a police officer and 25 years in any other case."
+ ],
+ chunking_options={
+ "overlap_ratio": 0.1,
+ "overlap_tokens": 0,
+ "size": 512,
+ },
+ ignore_inextractability=False,
+ top_k=1,
+ )
+ assert_matches_type(AnswerExtraction, qa, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_raw_response_create(self, client: Isaacus) -> None:
+ response = client.extractions.qa.with_raw_response.create(
+ model="kanon-answer-extractor",
+ query="What is the punishment for murder in Victoria?",
+ texts=[
+ "The standard sentence for murder in the State of Victoria is 30 years if the person murdered was a police officer and 25 years in any other case."
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ qa = response.parse()
+ assert_matches_type(AnswerExtraction, qa, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ def test_streaming_response_create(self, client: Isaacus) -> None:
+ with client.extractions.qa.with_streaming_response.create(
+ model="kanon-answer-extractor",
+ query="What is the punishment for murder in Victoria?",
+ texts=[
+ "The standard sentence for murder in the State of Victoria is 30 years if the person murdered was a police officer and 25 years in any other case."
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ qa = response.parse()
+ assert_matches_type(AnswerExtraction, qa, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+
+class TestAsyncQa:
+ parametrize = pytest.mark.parametrize("async_client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create(self, async_client: AsyncIsaacus) -> None:
+ qa = await async_client.extractions.qa.create(
+ model="kanon-answer-extractor",
+ query="What is the punishment for murder in Victoria?",
+ texts=[
+ "The standard sentence for murder in the State of Victoria is 30 years if the person murdered was a police officer and 25 years in any other case."
+ ],
+ )
+ assert_matches_type(AnswerExtraction, qa, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncIsaacus) -> None:
+ qa = await async_client.extractions.qa.create(
+ model="kanon-answer-extractor",
+ query="What is the punishment for murder in Victoria?",
+ texts=[
+ "The standard sentence for murder in the State of Victoria is 30 years if the person murdered was a police officer and 25 years in any other case."
+ ],
+ chunking_options={
+ "overlap_ratio": 0.1,
+ "overlap_tokens": 0,
+ "size": 512,
+ },
+ ignore_inextractability=False,
+ top_k=1,
+ )
+ assert_matches_type(AnswerExtraction, qa, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncIsaacus) -> None:
+ response = await async_client.extractions.qa.with_raw_response.create(
+ model="kanon-answer-extractor",
+ query="What is the punishment for murder in Victoria?",
+ texts=[
+ "The standard sentence for murder in the State of Victoria is 30 years if the person murdered was a police officer and 25 years in any other case."
+ ],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ qa = await response.parse()
+ assert_matches_type(AnswerExtraction, qa, path=["response"])
+
+ @pytest.mark.skip()
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncIsaacus) -> None:
+ async with async_client.extractions.qa.with_streaming_response.create(
+ model="kanon-answer-extractor",
+ query="What is the punishment for murder in Victoria?",
+ texts=[
+ "The standard sentence for murder in the State of Victoria is 30 years if the person murdered was a police officer and 25 years in any other case."
+ ],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ qa = await response.parse()
+ assert_matches_type(AnswerExtraction, qa, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
From f29174f99d688d8d8e4b654f94fa3cb7ec5fb060 Mon Sep 17 00:00:00 2001
From: "stainless-app[bot]"
<142633134+stainless-app[bot]@users.noreply.github.com>
Date: Wed, 30 Apr 2025 08:14:01 +0000
Subject: [PATCH 10/10] release: 0.6.0
---
.release-please-manifest.json | 2 +-
CHANGELOG.md | 24 ++++++++++++++++++++++++
pyproject.toml | 2 +-
src/isaacus/_version.py | 2 +-
4 files changed, 27 insertions(+), 3 deletions(-)
diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index 2aca35a..4208b5c 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.5.0"
+ ".": "0.6.0"
}
\ No newline at end of file
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 8e22a49..c6de71f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,29 @@
# Changelog
+## 0.6.0 (2025-04-30)
+
+Full Changelog: [v0.5.0...v0.6.0](https://github.com/isaacus-dev/isaacus-python/compare/v0.5.0...v0.6.0)
+
+### Features
+
+* **api:** introduced extractive QA ([7b9856c](https://github.com/isaacus-dev/isaacus-python/commit/7b9856c7a64fd4694d0fe8436934fa520faa38cc))
+
+
+### Bug Fixes
+
+* **pydantic v1:** more robust ModelField.annotation check ([40be0d5](https://github.com/isaacus-dev/isaacus-python/commit/40be0d5d7bb0c4d5187c0207e6470800e9827216))
+
+
+### Chores
+
+* broadly detect json family of content-type headers ([ef18419](https://github.com/isaacus-dev/isaacus-python/commit/ef18419dc26bba05aec8f5e29711bcc6fe329e9e))
+* **ci:** add timeout thresholds for CI jobs ([f0438ce](https://github.com/isaacus-dev/isaacus-python/commit/f0438cebcfc587af81d967e610dc33ea5a53bb32))
+* **ci:** only use depot for staging repos ([869c0ff](https://github.com/isaacus-dev/isaacus-python/commit/869c0ff5824ccfd63a4123a026530df11352db44))
+* **internal:** codegen related update ([8860ae0](https://github.com/isaacus-dev/isaacus-python/commit/8860ae0393429d660038ce1c8d15020a42141979))
+* **internal:** fix list file params ([6dc4e32](https://github.com/isaacus-dev/isaacus-python/commit/6dc4e32ab00e83d2307bfb729222f66f24a1f45f))
+* **internal:** import reformatting ([57473e2](https://github.com/isaacus-dev/isaacus-python/commit/57473e25e03b551ab85b4d2ec484defdcc2de09d))
+* **internal:** refactor retries to not use recursion ([513599c](https://github.com/isaacus-dev/isaacus-python/commit/513599ce261e2ec9a034715e20ec150025186255))
+
## 0.5.0 (2025-04-19)
Full Changelog: [v0.4.0...v0.5.0](https://github.com/isaacus-dev/isaacus-python/compare/v0.4.0...v0.5.0)
diff --git a/pyproject.toml b/pyproject.toml
index 7aafeb0..84be4f9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "isaacus"
-version = "0.5.0"
+version = "0.6.0"
description = "The official Python library for the isaacus API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/isaacus/_version.py b/src/isaacus/_version.py
index 3375585..86b8b84 100644
--- a/src/isaacus/_version.py
+++ b/src/isaacus/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "isaacus"
-__version__ = "0.5.0" # x-release-please-version
+__version__ = "0.6.0" # x-release-please-version