From fe25a6b0c77aae02dd46156daa4154619cd4ea2e Mon Sep 17 00:00:00 2001 From: Liam Thompson <32779855+leemthompo@users.noreply.github.com> Date: Wed, 27 Nov 2024 11:40:54 +0100 Subject: [PATCH] Update "Download and deploy ELSER" snippet with adaptive allocations (#2878) (cherry picked from commit 57401489d311436254246885568b826f34b5cd08) --- docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc b/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc index 7181f463f..b3fb2779d 100644 --- a/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc +++ b/docs/en/stack/ml/nlp/ml-nlp-elser.asciidoc @@ -124,7 +124,11 @@ PUT _inference/sparse_embedding/my-elser-model { "service": "elasticsearch", "service_settings": { - "num_allocations": 1, + "adaptive_allocations": { + "enabled": true, + "min_number_of_allocations": 1, + "max_number_of_allocations": 10 + }, "num_threads": 1, "model_id": ".elser_model_2_linux-x86_64" } @@ -132,6 +136,7 @@ PUT _inference/sparse_embedding/my-elser-model ---------------------------------- -- The API request automatically initiates the model download and then deploy the model. +This example uses <> through adaptive allocation. Refer to the {ref}/infer-service-elser.html[ELSER {infer} service documentation] to learn more about the available settings.