From b9b63214b9c02873f74e25964944f033c493e7da Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Mon, 26 Jan 2026 16:11:13 -0800 Subject: [PATCH 01/22] AI-Dynamo updates. Signed-off-by: Kapil Arya --- conf/experimental/ai_dynamo/test/vllm.toml | 13 +- .../ai_dynamo/test_scenario/vllm_k8s.toml | 5 +- .../ai_dynamo/test_scenario/vllm_slurm.toml | 6 +- .../systems/kubernetes/kubernetes_system.py | 74 +- .../slurm/slurm_command_gen_strategy.py | 8 +- src/cloudai/workloads/ai_dynamo/__init__.py | 16 +- src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 256 ++++++- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 661 ++++++++++++------ .../ai_dynamo/calc_percentile_csv.py | 139 ++++ .../workloads/ai_dynamo/custom_workload.sh | 172 +++++ src/cloudai/workloads/ai_dynamo/genai_perf.sh | 106 +++ .../ai_dynamo/kubernetes_json_gen_strategy.py | 10 +- src/cloudai/workloads/ai_dynamo/lmbench.sh | 119 ++++ .../workloads/ai_dynamo/openai_chat_client.py | 220 ++++++ .../ai_dynamo/report_generation_strategy.py | 186 +---- .../ai_dynamo/slurm_command_gen_strategy.py | 127 ++-- tests/json_gen_strategy/test_ai_dynamo.py | 29 +- tests/ref_data/ai-dynamo.sbatch | 79 ++- ...st_ai_dynamo_report_generation_strategy.py | 83 +-- ...st_ai_dynamo_slurm_command_gen_strategy.py | 43 +- tests/test_acceptance.py | 20 +- tests/test_calc_percentile_csv.py | 92 +++ 22 files changed, 1864 insertions(+), 600 deletions(-) mode change 100755 => 100644 src/cloudai/workloads/ai_dynamo/ai_dynamo.sh create mode 100644 src/cloudai/workloads/ai_dynamo/calc_percentile_csv.py create mode 100644 src/cloudai/workloads/ai_dynamo/custom_workload.sh create mode 100644 src/cloudai/workloads/ai_dynamo/genai_perf.sh create mode 100644 src/cloudai/workloads/ai_dynamo/lmbench.sh create mode 100644 src/cloudai/workloads/ai_dynamo/openai_chat_client.py create mode 100644 tests/test_calc_percentile_csv.py diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index ddf132194..56db3654f 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -29,7 +29,14 @@ docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.7.0" decode-cmd = 'python3 -m dynamo.vllm' [cmd_args.dynamo.decode_worker] + num-nodes = 1 + + [cmd_args.dynamo.decode_worker.args] + model = "Qwen/Qwen3-0.6B" + gpu-memory-utilization = 0.95 + tensor-parallel-size = 8 pipeline-parallel-size = 1 + data-parallel-size = 1 [cmd_args.genai_perf] model = "Qwen/Qwen3-0.6B" @@ -46,6 +53,10 @@ docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.7.0" concurrency = 2 extra-args = "--streaming -- -v --async" + [cmd_args.lmcache] + + [cmd_args.lmbench] + [extra_env_vars] UCX_LOG_LEVEL = "warn" UCX_TLS = "cuda_copy,rc_x" diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_k8s.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_k8s.toml index 66a67db57..f557bac39 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_k8s.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_k8s.toml @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -24,7 +24,10 @@ test_name = "vLLM-Qwen3-0.6B" [Tests.cmd_args.dynamo] [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 1 + [Tests.cmd_args.dynamo.prefill_worker.args] tensor-parallel-size = 8 + [Tests.cmd_args.dynamo.decode_worker] num-nodes = 1 + [Tests.cmd_args.dynamo.decode_worker.args] tensor-parallel-size = 8 diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml index b32e93fe2..e1062c3bb 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,11 +25,13 @@ time_limit = "00:10:00" [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 1 + [Tests.cmd_args.dynamo.prefill_worker.args] tensor-parallel-size = 4 pipeline-parallel-size = 1 [Tests.cmd_args.dynamo.decode_worker] num-nodes = 1 + [Tests.cmd_args.dynamo.decode_worker.args] tensor-parallel-size = 4 pipeline-parallel-size = 1 @@ -41,10 +43,12 @@ time_limit = "00:10:00" [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 2 + [Tests.cmd_args.dynamo.prefill_worker.args] tensor-parallel-size = 4 pipeline-parallel-size = 1 [Tests.cmd_args.dynamo.decode_worker] num-nodes = 2 + [Tests.cmd_args.dynamo.decode_worker.args] tensor-parallel-size = 4 pipeline-parallel-size = 1 diff --git a/src/cloudai/systems/kubernetes/kubernetes_system.py b/src/cloudai/systems/kubernetes/kubernetes_system.py index a74bc1fc6..215e2b684 100644 --- a/src/cloudai/systems/kubernetes/kubernetes_system.py +++ b/src/cloudai/systems/kubernetes/kubernetes_system.py @@ -298,25 +298,75 @@ def _run_genai_perf(self, job: KubernetesJob) -> None: raise TypeError("Test definition must be an instance of AIDynamoTestDefinition") genai_perf_results_path = "/tmp/cloudai/genai-perf" + frontend_pod = self._get_dynamo_pod_by_role(role="frontend") - genai_perf_cmd = ["genai-perf", "profile", f"--artifact-dir={genai_perf_results_path}"] - for k, v in tdef.cmd_args.genai_perf.model_dump( - exclude={"extra_args", "extra-args"}, exclude_none=True - ).items(): - genai_perf_cmd.append(f"--{k}={v}") - if extra_args := tdef.cmd_args.genai_perf.extra_args: - genai_perf_cmd.extend(extra_args.split()) - logging.debug(f"GenAI perf arguments: {genai_perf_cmd=}") + # Copy wrapper script and calc_percentile_csv script to the pod + wrapper_script_path = tdef.genai_perf_script.installed_path + calc_csv_script_path = tdef.calc_percentile_csv.installed_path - frontend_pod = self._get_dynamo_pod_by_role(role="frontend") + pod_wrapper_path = "/tmp/genai_perf.sh" + pod_calc_csv_path = "/tmp/calc_percentile_csv.py" + + logging.debug(f"Copying wrapper script {wrapper_script_path} to pod {frontend_pod}") + cp_wrapper_cmd = f"kubectl cp {wrapper_script_path} {self.default_namespace}/{frontend_pod}:{pod_wrapper_path}" + subprocess.run(cp_wrapper_cmd, shell=True, capture_output=True, text=True, check=True) + + logging.debug(f"Copying calc_percentile_csv script {calc_csv_script_path} to pod {frontend_pod}") + cp_calc_cmd = f"kubectl cp {calc_csv_script_path} {self.default_namespace}/{frontend_pod}:{pod_calc_csv_path}" + subprocess.run(cp_calc_cmd, shell=True, capture_output=True, text=True, check=True) - logging.debug(f"Executing genai-perf in pod={frontend_pod} cmd={genai_perf_cmd}") + # Make wrapper script executable + chmod_cmd = ["chmod", "+x", pod_wrapper_path] + logging.debug(f"Making wrapper script executable in pod {frontend_pod}") + try: + lazy.k8s.stream.stream( + self.core_v1.connect_get_namespaced_pod_exec, + name=frontend_pod, + namespace=self.default_namespace, + command=chmod_cmd, + stderr=True, + stdin=False, + stdout=True, + tty=False, + ) + except lazy.k8s.client.ApiException as e: + logging.error(f"Error making wrapper script executable in pod '{frontend_pod}': {e}") + + # Build genai-perf command arguments + genai_perf_cmd_parts = ["genai-perf", "profile", f"--artifact-dir={genai_perf_results_path}"] + if tdef.cmd_args.genai_perf.args: + for k, v in tdef.cmd_args.genai_perf.args.model_dump(exclude_none=True).items(): + genai_perf_cmd_parts.append(f"--{k}={v}") + if extra_args := tdef.cmd_args.genai_perf.extra_args: + if isinstance(extra_args, str): + genai_perf_cmd_parts.extend(extra_args.split()) + else: + genai_perf_cmd_parts.extend(extra_args) + + # Build wrapper command with proper parameters + report_file = "genai_perf_report.csv" + wrapper_cmd = [ + "/bin/bash", + pod_wrapper_path, + "--result_dir", + genai_perf_results_path, + "--report_file", + report_file, + "--calc_percentile_csv_script", + pod_calc_csv_path, + "--gpus_per_node", + str(self.gpus_per_node), + "--", + *genai_perf_cmd_parts, + ] + + logging.debug(f"Executing genai-perf wrapper in pod={frontend_pod} cmd={wrapper_cmd}") try: genai_results = lazy.k8s.stream.stream( self.core_v1.connect_get_namespaced_pod_exec, name=frontend_pod, namespace=self.default_namespace, - command=genai_perf_cmd, + command=wrapper_cmd, stderr=True, stdin=False, stdout=True, @@ -326,7 +376,7 @@ def _run_genai_perf(self, job: KubernetesJob) -> None: with (job.test_run.output_path / "genai_perf.log").open("w") as f: f.write(genai_results) except lazy.k8s.client.ApiException as e: - logging.error(f"Error executing genai-perf command in pod '{frontend_pod}': {e}") + logging.error(f"Error executing genai-perf wrapper command in pod '{frontend_pod}': {e}") cp_logs_cmd = " ".join( [ diff --git a/src/cloudai/systems/slurm/slurm_command_gen_strategy.py b/src/cloudai/systems/slurm/slurm_command_gen_strategy.py index 65fae14f0..3893769a0 100644 --- a/src/cloudai/systems/slurm/slurm_command_gen_strategy.py +++ b/src/cloudai/systems/slurm/slurm_command_gen_strategy.py @@ -49,6 +49,8 @@ def __init__(self, system: System, test_run: TestRun) -> None: super().__init__(system, test_run) self.system = cast(SlurmSystem, system) self.test_run = test_run + self.container_install_path = "/cloudai_install" + self.container_results_path = "/cloudai_run_results" self._node_spec_cache: dict[str, tuple[int, list[str]]] = {} @@ -79,8 +81,8 @@ def container_mounts(self) -> list[str]: repo_mounts.append(f"{path}:{repo.container_mount}") mounts = [ - f"{self.test_run.output_path.absolute()}:/cloudai_run_results", - f"{self.system.install_path.absolute()}:/cloudai_install", + f"{self.test_run.output_path.absolute()}:{self.container_results_path}", + f"{self.system.install_path.absolute()}:{self.container_install_path}", f"{self.test_run.output_path.absolute()}", *tdef.extra_container_mounts, *repo_mounts, @@ -302,7 +304,7 @@ def _ranks_mapping_cmd(self) -> str: def _metadata_cmd(self) -> str: (self.test_run.output_path.absolute() / "metadata").mkdir(parents=True, exist_ok=True) num_nodes, _ = self.get_cached_nodes_spec() - metadata_script_path = "/cloudai_install" + metadata_script_path = self.container_install_path if not self.image_path(): metadata_script_path = str(self.system.install_path.absolute()) return " ".join( diff --git a/src/cloudai/workloads/ai_dynamo/__init__.py b/src/cloudai/workloads/ai_dynamo/__init__.py index 70ed6453c..fbdcaa747 100644 --- a/src/cloudai/workloads/ai_dynamo/__init__.py +++ b/src/cloudai/workloads/ai_dynamo/__init__.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,8 +19,13 @@ AIDynamoCmdArgs, AIDynamoTestDefinition, DecodeWorkerArgs, - GenAIPerfArgs, + GenAIPerf, + LMBench, + LMCache, + LMCacheArgs, PrefillWorkerArgs, + WorkerBaseArgs, + WorkerConfig, ) from .kubernetes_json_gen_strategy import AIDynamoKubernetesJsonGenStrategy from .report_generation_strategy import AIDynamoReportGenerationStrategy @@ -34,6 +39,11 @@ "AIDynamoSlurmCommandGenStrategy", "AIDynamoTestDefinition", "DecodeWorkerArgs", - "GenAIPerfArgs", + "GenAIPerf", + "LMBench", + "LMCache", + "LMCacheArgs", "PrefillWorkerArgs", + "WorkerBaseArgs", + "WorkerConfig", ] diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index d7a585c0f..ed642f372 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -18,23 +18,66 @@ from pathlib import Path from typing import Optional -from pydantic import AliasChoices, BaseModel, ConfigDict, Field - -from cloudai.core import DockerImage, File, GitRepo, HFModel, Installable, JobStatusResult, TestRun +from pydantic import ( + AliasChoices, + BaseModel, + ConfigDict, + Field, + field_serializer, + model_validator, +) + +from cloudai.core import ( + DockerImage, + File, + GitRepo, + HFModel, + Installable, + JobStatusResult, + TestRun, +) from cloudai.models.workload import CmdArgs, TestDefinition from .report_generation_strategy import CSV_FILES_PATTERN, JSON_FILES_PATTERN -class WorkerBaseArgs(BaseModel): - """Base arguments for VLLM workers.""" +class Args(BaseModel): + """Arguments for custom workloads.""" model_config = ConfigDict(extra="allow", populate_by_name=True) - num_nodes: int | list[int] = Field( - default=1, serialization_alias="num-nodes", validation_alias=AliasChoices("num-nodes", "num_nodes") + +class Workload(BaseModel): + """Arguments for custom workloads.""" + + model_config = ConfigDict(extra="allow", populate_by_name=True) + + name: str + cmd: str + script: File + report_name: Optional[str] = Field(default=None, serialization_alias="report-name") + repo: Optional[GitRepo] = None + args: Optional[Args] = None + extra_args: str | list[str] | None = Field( + default=None, + serialization_alias="extra-args", + validation_alias=AliasChoices("extra-args", "extra_args"), ) - nodes: str | None = Field(default=None) + + @model_validator(mode="after") + def validate_workload(self) -> "Workload": + """Validate workload.""" + if self.report_name is None: + self.report_name = f"{self.name}_report.csv" + if self.args is None: + self.args = Args() + return self + + +class WorkerBaseArgs(Args): + """Base arguments for VLLM workers.""" + + model_config = ConfigDict(extra="allow", populate_by_name=True) data_parallel_size: int | list[int] | None = Field( default=None, @@ -56,6 +99,20 @@ class WorkerBaseArgs(BaseModel): serialization_alias="tensor-parallel-size", validation_alias=AliasChoices("tensor-parallel-size", "tensor_parallel_size"), ) + + +class WorkerConfig(BaseModel): + """Configuration for workers.""" + + model_config = ConfigDict(extra="allow", populate_by_name=True) + + num_nodes: int | list[int] = Field( + default=1, serialization_alias="num-nodes", validation_alias=AliasChoices("num-nodes", "num_nodes") + ) + nodes: str | None = Field(default=None) + + args: WorkerBaseArgs = Field(default_factory=WorkerBaseArgs) + extra_args: str | list[str] | None = Field( default=None, serialization_alias="extra-args", @@ -78,49 +135,130 @@ class DecodeWorkerArgs(WorkerBaseArgs): class AIDynamoArgs(BaseModel): """Arguments for AI Dynamo setup.""" - model_config = ConfigDict(extra="allow") + model_config = ConfigDict(extra="allow", populate_by_name=True) model: str = "Qwen/Qwen3-0.6B" backend: str = "vllm" + connector: Optional[str] = None # none, lmcache, kvbm workspace_path: str = Field( default="/workspace", serialization_alias="workspace-path", validation_alias=AliasChoices("workspace-path", "workspace_path"), ) - decode_worker: DecodeWorkerArgs = Field(default_factory=DecodeWorkerArgs) + decode_worker: WorkerConfig = Field(default_factory=WorkerConfig) decode_cmd: str = Field( default="python3 -m dynamo.vllm", serialization_alias="decode-cmd", validation_alias=AliasChoices("decode-cmd", "decode_cmd"), ) - prefill_worker: PrefillWorkerArgs | None = None + prefill_worker: WorkerConfig = Field(default_factory=WorkerConfig) prefill_cmd: str = Field( - default="python3 -m dynamo.vllm", + default="python3 -m dynamo.vllm --is-prefill-worker", serialization_alias="prefill-cmd", validation_alias=AliasChoices("prefill-cmd", "prefill_cmd"), ) -class GenAIPerfArgs(BaseModel): - """Arguments for GenAI performance profiling.""" +class LMCacheArgs(BaseModel): + """Arguments for LMCache.""" + + model_config = ConfigDict(extra="allow") + + chunk_size: int = 256 + local_cpu: bool = False + nixl_buffer_size: int = 10737418240 + nixl_buffer_device: str = "cuda" + extra_config_enable_nixl_storage: bool = True + extra_config_nixl_backend: str = "GDS_MT" + extra_config_nixl_file_pool_size: int = 64 + extra_config_nixl_path: str = "%CACHEDIR%" + + # LMCache controller configuration + enable_controller: bool = True + lmcache_instance_id: str = "lmcache_default_instance" + controller_url: str = "localhost:9001" + lmcache_worker_port: int = 8788 + distributed_url: str = "localhost:8789" + + +class LMCache(BaseModel): + """LMCache configuration.""" model_config = ConfigDict(extra="allow") - extra_args: str | None = Field( + controller_cmd: str = "lmcache_controller --host localhost --port 9000 --monitor-port 9001" + repo: Optional[GitRepo] = GitRepo( + url="git@github.com:LMCache/LMCache.git", + commit="ab8530993992db873869ba882320953582d94309", + mount_as="/git/LMCache", + ) + + args: LMCacheArgs = Field(default_factory=LMCacheArgs) + extra_args: str | list[str] | None = Field( default=None, serialization_alias="extra-args", validation_alias=AliasChoices("extra-args", "extra_args"), ) +class GenAIPerf(Workload): + """Workload configuration for GenAI performance profiling.""" + + model_config = ConfigDict(extra="allow") + + name: str = "genai_perf" + cmd: str = "genai-perf profile" + script: File = File(Path(__file__).parent.parent / "ai_dynamo/genai_perf.sh") + + +class LMBench(Workload): + """Workload configuration for LMBench.""" + + model_config = ConfigDict(extra="allow") + + name: str = "lmbench" + script: File = File(Path(__file__).parent.parent / "ai_dynamo/lmbench.sh") + cmd: str = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" + qps: str | list[str] | None = "0.25,0.5,0.75,1.0,1.25,1.5,1.75,2.0" + repo: Optional[GitRepo] = GitRepo( + url="git@github.com:LMCache/LMBenchmark.git", + commit="e1406623c5e88878cf2b7fbd64fe6c47f7dcb66f", + mount_as="/git/LMBenchmark", + ) + + +class CustomWorkload(Workload): + """Generic workload script.""" + + model_config = ConfigDict(extra="allow") + + name: str = "custom_workload" + cmd: str = "hostname" + script: File = File(Path(__file__).parent.parent / "ai_dynamo/custom_workload.sh") + + +class Constraints(BaseModel): + """Constraints for validation of AI Dynamo configurations when using DSE.""" + + model_config = ConfigDict(extra="allow") + + prefill_tp_le_decode_tp: bool = True + tp_times_pp_le_gpus_per_node: bool = True + prefill_decode_nodes_le_total_nodes: bool = True + + class AIDynamoCmdArgs(CmdArgs): """Arguments for AI Dynamo.""" docker_image_url: str - huggingface_home_container_path: Path = Path("/root/.cache/huggingface") + storage_cache_dir: Optional[str] = None + num_nodes: int = 1 + gpus_per_node: int = 8 dynamo: AIDynamoArgs - genai_perf: GenAIPerfArgs - run_script: str = "" + lmcache: LMCache = Field(default_factory=LMCache) + genai_perf: GenAIPerf = Field(default_factory=GenAIPerf) + lmbench: LMBench = Field(default_factory=LMBench) + custom_workload: CustomWorkload = Field(default_factory=CustomWorkload) class AIDynamoTestDefinition(TestDefinition): @@ -129,10 +267,40 @@ class AIDynamoTestDefinition(TestDefinition): cmd_args: AIDynamoCmdArgs _docker_image: Optional[DockerImage] = None script: File = File(Path(__file__).parent.parent / "ai_dynamo/ai_dynamo.sh") + genai_perf_script: File = File(Path(__file__).parent.parent / "ai_dynamo/genai_perf.sh") + calc_percentile_csv: File = File(Path(__file__).parent.parent / "ai_dynamo/calc_percentile_csv.py") dynamo_repo: GitRepo = GitRepo( - url="https://github.com/ai-dynamo/dynamo.git", commit="f7e468c7e8ff0d1426db987564e60572167e8464" + url="https://github.com/ai-dynamo/dynamo.git", + commit="f7e468c7e8ff0d1426db987564e60572167e8464", + mount_as="/git/dynamo", ) _hf_model: HFModel | None = None + workloads: str = "genai_perf.sh,lmbench.sh,custom_workload.sh" + constraints: Constraints = Constraints() + + @model_validator(mode="after") + def validate_test_definition(self) -> "AIDynamoTestDefinition": + """Validate test definition.""" + # Populate git_repos list with all git repositories used by this test definition. + self.git_repos = [self.dynamo_repo] + if self.cmd_args.lmcache.repo: + self.git_repos.append(self.cmd_args.lmcache.repo) + if self.cmd_args.lmbench.repo: + self.git_repos.append(self.cmd_args.lmbench.repo) + if self.cmd_args.custom_workload.repo: + self.git_repos.append(self.cmd_args.custom_workload.repo) + + # Validate benchmark names + workloads = self.workloads.split(",") + for workload in workloads: + if workload not in [ + self.cmd_args.genai_perf.script.src.name, + self.cmd_args.lmbench.script.src.name, + self.cmd_args.custom_workload.script.src.name, + ]: + raise ValueError(f"Invalid workload script: {workload}") + + return self @property def docker_image(self) -> DockerImage: @@ -148,7 +316,20 @@ def hf_model(self) -> HFModel: @property def installables(self) -> list[Installable]: - return [self.docker_image, self.script, self.dynamo_repo, self.hf_model] + """Get all installables for this test definition.""" + result = [ + self.docker_image, + self.script, + self.hf_model, + self.genai_perf_script, + self.calc_percentile_csv, + self.cmd_args.lmbench.script, + self.cmd_args.custom_workload.script, + File(Path(__file__).parent.parent / "ai_dynamo/openai_chat_client.py"), + *self.git_repos, + ] + + return result def was_run_successful(self, tr: TestRun) -> JobStatusResult: output_path = tr.output_path @@ -159,3 +340,36 @@ def was_run_successful(self, tr: TestRun) -> JobStatusResult: if not has_results: return JobStatusResult(False, "No result files found in the output directory.") return JobStatusResult(True) + + def constraint_check(self, tr: TestRun) -> bool: + prefill_worker = tr.test.cmd_args.dynamo.prefill_worker + decode_worker = tr.test.cmd_args.dynamo.decode_worker + + prefill_tp = prefill_worker.args.tensor_parallel_size if prefill_worker else 1 + decode_tp = decode_worker.args.tensor_parallel_size if decode_worker else 1 + prefill_pp = prefill_worker.args.pipeline_parallel_size if prefill_worker else 1 + decode_pp = decode_worker.args.pipeline_parallel_size if decode_worker else 1 + prefill_nodes = prefill_worker.num_nodes if prefill_worker else 0 + decode_nodes = decode_worker.num_nodes if decode_worker else 1 + + if self.constraints.prefill_tp_le_decode_tp and prefill_tp > decode_tp: + logging.info("constraint_check failed for: prefill_tp_le_decode_tp") + return False + logging.info("constraint_check passed for: prefill_tp_le_decode_tp") + + gpus_per_node = tr.test.cmd_args.gpus_per_node + if self.constraints.tp_times_pp_le_gpus_per_node and ( + prefill_tp * prefill_pp > gpus_per_node or decode_tp * decode_pp > gpus_per_node + ): + logging.info("constraint_check failed for: tp_times_pp_le_gpus_per_node") + return False + logging.info("constraint_check passed for: tp_times_pp_le_gpus_per_node") + + num_nodes = tr.test.cmd_args.num_nodes + nodes_check = self.constraints.prefill_decode_nodes_le_total_nodes + if nodes_check and prefill_nodes + decode_nodes > num_nodes: + logging.info("constraint_check failed for: prefill_decode_nodes_le_total_nodes") + return False + logging.info("constraint_check passed for: prefill_decode_nodes_le_total_nodes") + + return True diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh old mode 100755 new mode 100644 index 51e0c8e84..dafc0592e --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -2,68 +2,74 @@ # CloudAI params RESULTS_DIR="/cloudai_run_results" +INSTALL_DIR="/cloudai_install" HUGGINGFACE_HOME="/root/.cache/huggingface" -DONE_MARKER="frontend_done.marker" -FATAL_ERROR_MARKER="fatal_error.marker" -: "${DYNAMO_WORKER_ERROR_PATTERN:=zmq\.error\.ZMQError:.*Address already in use|UCX.*ERROR|ERROR core\.run_engine_core:.*EngineCore failed to start|ERROR multiproc_executor\.worker_busy_loop:.*WorkerProc hit an exception|EngineDeadError|EngineCore encountered an issue}" +DONE_MARKER="dynamo_frontend_done.marker" +FATAL_ERROR_MARKER="dynamo_fatal_error.marker" NODE_ROLES_FILE="node_roles.log" +TEST_USER="$USER" export DYN_SDK_DISABLE_ANSI_LOGGING=1 export VLLM_DISABLE_COLORED_OUTPUT=1 export VLLM_NO_COLOR=1 +export VLLM_LOGGING_COLOR=0 +#export VLLM_LOGGING_CONFIG_PATH="/cloudai_install/vllm_logging_config.json" + export ABSL_LOGGING_USE_COLOR=0 export DYN_LOGGING_DISABLE_ANSI_COLORS=1 export TERM=dumb export NO_COLOR=1 +export TQDM_DISABLE=1 # Disables tqdm progress bars globally +export TQDM_MININTERVAL=999999 # Makes tqdm update very rarely export DEBIAN_FRONTEND=noninteractive export APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1 +declare -A prefill_config declare -A prefill_args +declare -A decode_config declare -A decode_args +declare -A lmcache_args +declare -A lmcache_config declare -A genai_perf_args +declare -A genai_perf_config +declare -A lmbench_args +declare -A lmbench_config +declare -A custom_workload_args +declare -A custom_workload_config declare -A dynamo_args dynamo_args["backend"]="vllm" dynamo_args["node-setup-cmd"]="" -dynamo_args["prefill-cmd"]="python3 -m dynamo.vllm --is-prefill-worker" -dynamo_args["decode-cmd"]="python3 -m dynamo.vllm" dynamo_args["ingress-cmd"]="python -m dynamo.frontend --router-mode kv" dynamo_args["port"]=$((8080 + SLURM_JOBID % 100)) dynamo_args["endpoint"]="v1/chat/completions" -dynamo_args["model"]="deepseek-ai/DeepSeek-R1-Distill-Llama-8B" +dynamo_args["model"]="Qwen/Qwen3-0.6B" +dynamo_args["connector"]="none" dynamo_args["etcd-port"]=2379 dynamo_args["nats-port"]=4222 dynamo_args["workspace-path"]="/workspace" dynamo_args["frontend-node"]="" -dynamo_args["num-prefill-nodes"]=1 -dynamo_args["num-decode-nodes"]=1 -dynamo_args["prefill-nodes"]="" -dynamo_args["decode-nodes"]="" -dynamo_args["tp-arg-name"]="tensor-parallel-size" -dynamo_args["pp-arg-name"]="pipeline-parallel-size" -dynamo_args["multiple-prefill-workers-per-node"]="true" -dynamo_args["multiple-decode-workers-per-node"]="true" -dynamo_args["prefill-initialized-regex"]="Worker.*has.been.initialized" -dynamo_args["decode-initialized-regex"]="Worker.*has.been.initialized" dynamo_args["etcd-cmd"]="etcd --log-level debug" dynamo_args["nats-cmd"]="nats-server -js" -dynamo_args["genai-perf-cmd"]="genai-perf profile" +dynamo_args["worker-error-pattern"]="zmq.error.ZMQError:.Address.already.in.use|UCX.*ERROR|ERROR.core.run_engine_core:.EngineCore.failed.to.start|ERROR.multiproc_executor.worker_busy_loop:.WorkerProc.hit.an.exception|EngineDeadError|EngineCore.encountered.an.issue" # sglang-specific optional ports. Ignored by vllm. dynamo_args["sgl-http-port"]=9001 dynamo_args["prefill-port"]=30011 dynamo_args["decode-port"]=30021 -# GenAI Perf params -GENAI_PERF_PROFILE_EXPORT_FILE="profile.json" -GENAI_PERF_ARTIFACT_DIR="genai_perf_artifacts" function log() { - echo "[$(date --iso-8601=ns) $(hostname)]: $@" + echo -e "[$(date +%F\ %T) $(hostname)]: $*" +} + +function min() +{ + echo "$(( $1 < $2 ? $1 : $2 ))" } _is_vllm() { [[ "${dynamo_args["backend"]}" == "vllm" ]]; } @@ -85,29 +91,6 @@ _csv_index_of() { echo "-1" } -_validate_or_build_nodelists() { - local dl_len=$(_csv_len "${dynamo_args["decode-nodes"]}") - local pl_len=$(_csv_len "${dynamo_args["prefill-nodes"]}") - if (( dl_len > 0 )); then dynamo_args["num-decode-nodes"]="$dl_len"; fi - if (( pl_len > 0 )); then dynamo_args["num-prefill-nodes"]="$pl_len"; fi - - if [[ -z "${dynamo_args["decode-nodes"]}" || -z "${dynamo_args["prefill-nodes"]}" ]]; then - if [[ -z "${DYNAMO_NODELIST:-}" ]]; then - log "ERROR: Provide --dynamo-decode-nodes/--dynamo-prefill-nodes or set DYNAMO_NODELIST"; exit 1 - fi - local d="${dynamo_args["num-decode-nodes"]}" - local p="${dynamo_args["num-prefill-nodes"]}" - local total=$(_csv_len "${DYNAMO_NODELIST}") - if (( total < d + p )); then - log "ERROR: DYNAMO_NODELIST has ${total} entries; need decode(${d})+prefill(${p})"; exit 1 - fi - [[ -z "${dynamo_args["decode-nodes"]}" ]] && \ - dynamo_args["decode-nodes"]=$(echo "$DYNAMO_NODELIST" | cut -d',' -f1-"$d") - [[ -z "${dynamo_args["prefill-nodes"]}" ]] && \ - dynamo_args["prefill-nodes"]=$(echo "$DYNAMO_NODELIST" | cut -d',' -f$(( d + 1 ))-) - fi -} - _gpus_per_node() { local n=$(echo "${CUDA_VISIBLE_DEVICES:-}" | tr ',' '\n' | grep -c . || true) [[ "$n" -gt 0 ]] && echo "$n" || echo "1" @@ -125,23 +108,14 @@ _resolve_host_ip() { } _apply_sglang_section_args() { - prefill_args["--port"]=${dynamo_args["prefill-port"]} - decode_args["--port"]=${dynamo_args["decode-port"]} - prefill_args["--served-model-name"]=${dynamo_args["model"]} - decode_args["--served-model-name"]=${dynamo_args["model"]} - - # model-path must point to HF cache for sglang - prefill_args["--model-path"]="${HUGGINGFACE_HOME}" - decode_args["--model-path"]="${HUGGINGFACE_HOME}" - local self="$(_current_node_name)" local gpn="$(_gpus_per_node)" # prefill group - local prefill_nodes="${dynamo_args["num-prefill-nodes"]}" - local prefill_master_host="$(_first_in_csv "${dynamo_args["prefill-nodes"]}")" + local prefill_nodes="${prefill_config["num-nodes"]}" + local prefill_master_host="$(_first_in_csv "${prefill_config["node-list"]}")" local prefill_master_ip="$(_resolve_host_ip "${prefill_master_host}")" - local prefill_rank="$(_csv_index_of "${dynamo_args["prefill-nodes"]}" "$self")" + local prefill_rank="$(_csv_index_of "${prefill_config["node-list"]}" "$self")" local prefill_total_gpus=$(( gpn * prefill_nodes )) prefill_args["--dist-init-addr"]="${prefill_master_ip}:${dynamo_args["prefill-port"]}" prefill_args["--nnodes"]="${prefill_nodes}" @@ -150,10 +124,10 @@ _apply_sglang_section_args() { prefill_args["--dp-size"]="${prefill_args["--dp-size"]:-${prefill_total_gpus}}" # decode group - local decode_nodes="${dynamo_args["num-decode-nodes"]}" - local decode_master_host="$(_first_in_csv "${dynamo_args["decode-nodes"]}")" + local decode_nodes="${decode_config["num-nodes"]}" + local decode_master_host="$(_first_in_csv "${decode_config["node-list"]}")" local decode_master_ip="$(_resolve_host_ip "${decode_master_host}")" - local decode_rank="$(_csv_index_of "${dynamo_args["decode-nodes"]}" "$self")" + local decode_rank="$(_csv_index_of "${decode_config["node-list"]}" "$self")" local decode_total_gpus=$(( gpn * decode_nodes )) decode_args["--dist-init-addr"]="${decode_master_ip}:${dynamo_args["decode-port"]}" decode_args["--nnodes"]="${decode_nodes}" @@ -171,14 +145,6 @@ _apply_sglang_section_args() { unset 'decode_args["--model"]' } -_apply_genai_perf_section_args() { - genai_perf_args["--model"]="${dynamo_args["model"]}" - genai_perf_args["--url"]="${dynamo_args["url"]}" - genai_perf_args["--endpoint"]="${dynamo_args["endpoint"]}" - genai_perf_args["--artifact-dir"]="${RESULTS_DIR}/${GENAI_PERF_ARTIFACT_DIR}/" - genai_perf_args["--profile-export-file"]="${GENAI_PERF_PROFILE_EXPORT_FILE}" -} - _parse_cli_pairs() { log "Parsing args:" while [[ $# -ge 2 ]]; do @@ -187,21 +153,102 @@ _parse_cli_pairs() { case $key in --dynamo-*) dynamo_args["${key#--dynamo-}"]="$2" ;; + --prefill-args-*) + prefill_args["--${key#--prefill-args-}"]="$2" ;; --prefill-*) - prefill_args["--${key#--prefill-}"]="$2" ;; + prefill_config["${key#--prefill-}"]="$2" ;; + --decode-args-*) + decode_args["--${key#--decode-args-}"]="$2" ;; --decode-*) - decode_args["--${key#--decode-}"]="$2" ;; - --genai-perf-*) - genai_perf_args["--${key#--genai-perf-}"]="$2" ;; + decode_config["${key#--decode-}"]="$2" ;; + --lmcache-args-*) + lmcache_args["${key#--lmcache-args-}"]="$2" ;; + --lmcache-*) + lmcache_config["${key#--lmcache-}"]="$2" ;; + --lmbench-args-*) + lmbench_args["--${key#--lmbench-args-}"]="$2" ;; + --lmbench-*) + lmbench_config["--${key#--lmbench-}"]="$2" ;; + --genai_perf-args-*) + genai_perf_args["--${key#--genai_perf-args-}"]="$2" ;; + --genai_perf-*) + genai_perf_config["--${key#--genai_perf-}"]="$2" ;; + --custom_workload-args-*) + custom_workload_args["--${key#--custom_workload-args-}"]="$2" ;; + --custom_workload-*) + custom_workload_config["--${key#--custom_workload-}"]="$2" ;; --huggingface-home) HUGGINGFACE_HOME="$2" ;; --results-dir) RESULTS_DIR="$2" ;; + --install-dir) + INSTALL_DIR="$2" ;; + --user) + TEST_USER="$2" ;; esac shift; shift; done } +_populate_nodelist() { + local num_nodes="$1" + local exclude_nodelist="$2" + + # Handle zero nodes case + if [[ -z "$num_nodes" || "$num_nodes" -eq 0 ]]; then + echo "" + return + fi + + local count=0 + local nodelist="" + for node in $(echo $DYNAMO_NODELIST | cut -d',' -f1-${num_nodes}); do + if ! echo "${exclude_nodelist}" | grep -q "$node"; then + nodelist+="$node," + count=$(( count + 1 )) + if [[ "$count" -eq "${num_nodes}" ]]; then + break + fi + fi + done + + # Terminate trailing comma + nodelist=${nodelist%,} + echo "$nodelist" +} + +_set_nodelists() +{ + if [[ -z "${DYNAMO_NODELIST:-}" ]]; then + log "ERROR: DYNAMO_NODELIST is not set" + exit 1 + fi + + if [[ -z "${decode_config["node-list"]}" ]]; then + decode_config["node-list"]=$(_populate_nodelist "${decode_config["num-nodes"]}" "") + fi + + if [[ -z "${prefill_config["node-list"]}" ]]; then + prefill_config["node-list"]=$(_populate_nodelist "${prefill_config["num-nodes"]}" "${decode_config["node-list"]}") + fi + + # Prefill nodelist should match prefill node count (skip validation if num-nodes is 0) + local prefill_num_nodes="${prefill_config["num-nodes"]:-0}" + if [[ "$prefill_num_nodes" -gt 0 ]]; then + local prefill_nodelist_count=$(_csv_len "${prefill_config["node-list"]}") + if [[ "${prefill_nodelist_count}" -ne "${prefill_num_nodes}" ]]; then + log "ERROR: number of nodes in prefill nodelist (${prefill_nodelist_count}) does not match prefill node count (${prefill_num_nodes})" + exit 1 + fi + fi + + local decode_nodelist_count=$(_csv_len "${decode_config["node-list"]}") + if [[ "${decode_nodelist_count}" -ne "${decode_config["num-nodes"]}" ]]; then + log "ERROR: number of nodes in decode nodelist (${decode_nodelist_count}) does not match decode node count (${decode_config["num-nodes"]})" + exit 1 + fi +} + _set_backend_defaults() { case "${dynamo_args["backend"]}" in vllm) @@ -219,50 +266,42 @@ _set_backend_defaults() { esac } -_sync_num_nodes_from_section_args() { - if [[ -n "${prefill_args["--num-nodes"]:-}" ]]; then - dynamo_args["num-prefill-nodes"]="${prefill_args["--num-nodes"]}" - fi - if [[ -n "${decode_args["--num-nodes"]:-}" ]]; then - dynamo_args["num-decode-nodes"]="${decode_args["--num-nodes"]}" +_apply_connector_settings() { + local connector="${dynamo_args["connector"]:-}" + if [[ -z "$connector" || "$connector" == "none" ]]; then + ENABLE_LMCACHE="${ENABLE_LMCACHE:-0}" + ENABLE_KVBM="${ENABLE_KVBM:-0}" + return fi + + case "$connector" in + lmcache) + ENABLE_LMCACHE=1 + ENABLE_KVBM=0 + ;; + kvbm) + ENABLE_LMCACHE=0 + ENABLE_KVBM=1 + ;; + *) + log "ERROR: Unknown connector '${connector}' (expected none|lmcache|kvbm)" + exit 1 + ;; + esac } _patch_dynamo_args() { - if [[ -z "${dynamo_args["decode-nodes"]}" ]]; then - if [[ -n "${decode_args["--node-list"]}" ]]; then - dynamo_args["decode-nodes"]="${decode_args["--node-list"]}" - else - dynamo_args["decode-nodes"]=$(echo $DYNAMO_NODELIST | cut -d',' -f1-${dynamo_args["num-decode-nodes"]}) - fi - fi - - if [[ -z "${dynamo_args["prefill-nodes"]}" ]]; then - if [[ -n "${prefill_args["--node-list"]}" ]]; then - dynamo_args["prefill-nodes"]="${prefill_args["--node-list"]}" - else - dynamo_args["prefill-nodes"]=$(echo $DYNAMO_NODELIST | cut -d',' -f$(( ${dynamo_args["num-decode-nodes"]} + 1 ))-) - fi - fi - if [[ -z "${dynamo_args["frontend-node"]}" ]]; then - dynamo_args["frontend-node"]=$(echo ${dynamo_args["decode-nodes"]} | cut -d',' -f1) + dynamo_args["frontend-node"]=$(echo ${decode_config["node-list"]} | cut -d',' -f1) fi dynamo_args["url"]="http://${dynamo_args["frontend-node"]}:${dynamo_args["port"]}" - - _validate_or_build_nodelists } _patch_section_args() { - prefill_args["--model"]="${dynamo_args["model"]}" - decode_args["--model"]="${dynamo_args["model"]}" - if _is_sglang; then _apply_sglang_section_args fi - - _apply_genai_perf_section_args } _compute_worker_allocation_sglang() { @@ -273,22 +312,13 @@ _compute_worker_allocation_sglang() { fi # sglang: one worker per node using all GPUs - dynamo_args["prefill-gpus-per-worker"]=$num_gpus - dynamo_args["decode-gpus-per-worker"]=$num_gpus - dynamo_args["prefill-workers-per-node"]=1 - dynamo_args["decode-workers-per-node"]=1 - - if [[ -n "${prefill_args["--num-nodes"]}" ]]; then - dynamo_args["num-prefill-nodes"]=${prefill_args["--num-nodes"]} - fi - if [[ -n "${decode_args["--num-nodes"]}" ]]; then - dynamo_args["num-decode-nodes"]=${decode_args["--num-nodes"]} - fi + prefill_config["gpus-per-worker"]=$num_gpus + decode_config["gpus-per-worker"]=$num_gpus + prefill_config["workers-per-node"]=1 + decode_config["workers-per-node"]=1 } _compute_worker_allocation_vllm() { - local tp_arg_name="--${dynamo_args["tp-arg-name"]}" - local pp_arg_name="--${dynamo_args["pp-arg-name"]}" local num_gpus="$(_gpus_per_node)" if [[ $num_gpus -eq 0 ]]; then @@ -296,37 +326,31 @@ _compute_worker_allocation_vllm() { exit 1 fi - dynamo_args["prefill-gpus-per-worker"]=$(( prefill_args[$tp_arg_name] * prefill_args[$pp_arg_name] )) - dynamo_args["decode-gpus-per-worker"]=$(( decode_args[$tp_arg_name] * decode_args[$pp_arg_name] )) + prefill_config["gpus-per-worker"]=$(( prefill_args["--tensor-parallel-size"] * prefill_args["--pipeline-parallel-size"] )) + decode_config["gpus-per-worker"]=$(( decode_args["--tensor-parallel-size"] * decode_args["--pipeline-parallel-size"] )) - if [[ ${dynamo_args["prefill-gpus-per-worker"]} -eq 0 ]] || [[ ${dynamo_args["decode-gpus-per-worker"]} -eq 0 ]]; then + if [[ ${prefill_config["gpus-per-worker"]} -eq 0 ]] || [[ ${decode_config["gpus-per-worker"]} -eq 0 ]]; then log "ERROR: Invalid TP/PP configuration" exit 1 fi - if [[ "${dynamo_args["multiple-prefill-workers-per-node"]}" != "true" ]]; then - dynamo_args["prefill-gpus-per-worker"]=$num_gpus + if [[ "${prefill_config["multiple-workers-per-node"]}" != "true" ]]; then + prefill_config["gpus-per-worker"]=$num_gpus fi - if [[ "${dynamo_args["multiple-decode-workers-per-node"]}" != "true" ]]; then - dynamo_args["decode-gpus-per-worker"]=$num_gpus + if [[ "${decode_config["multiple-workers-per-node"]}" != "true" ]]; then + decode_config["gpus-per-worker"]=$num_gpus fi - log "DECODE: num GPUs: $num_gpus, GPUs per worker: ${dynamo_args["decode-gpus-per-worker"]}" - log "PREFILL: num GPUs: $num_gpus, GPUs per worker: ${dynamo_args["prefill-gpus-per-worker"]}" - dynamo_args["prefill-workers-per-node"]=$(( num_gpus / dynamo_args["prefill-gpus-per-worker"] )) - dynamo_args["decode-workers-per-node"]=$(( num_gpus / dynamo_args["decode-gpus-per-worker"] )) - log "DECODE: workers per node: ${dynamo_args["decode-workers-per-node"]}" - log "PREFILL: workers per node: ${dynamo_args["prefill-workers-per-node"]}" + log "DECODE: num GPUs: $num_gpus, GPUs per worker: ${decode_config["gpus-per-worker"]}" + log "PREFILL: num GPUs: $num_gpus, GPUs per worker: ${prefill_config["gpus-per-worker"]}" + prefill_config["workers-per-node"]=$(( num_gpus / prefill_config["gpus-per-worker"] )) + decode_config["workers-per-node"]=$(( num_gpus / decode_config["gpus-per-worker"] )) + log "DECODE: workers per node: ${decode_config["workers-per-node"]}" + log "PREFILL: workers per node: ${prefill_config["workers-per-node"]}" - if [[ -n "${prefill_args["--num-nodes"]}" ]]; then - dynamo_args["num-prefill-nodes"]=${prefill_args["--num-nodes"]} - fi - if [[ -n "${decode_args["--num-nodes"]}" ]]; then - dynamo_args["num-decode-nodes"]=${decode_args["--num-nodes"]} - fi - log "NUM PREFILL NODES: ${dynamo_args["num-prefill-nodes"]}" - log "NUM DECODE NODES: ${dynamo_args["num-decode-nodes"]}" + log "NUM PREFILL NODES: ${prefill_config["num-nodes"]}" + log "NUM DECODE NODES: ${decode_config["num-nodes"]}" } _compute_worker_allocation() { @@ -337,35 +361,71 @@ _compute_worker_allocation() { fi } +arg_array_to_string() +{ + local -n arr=$1 + local result="" + for key in "${!arr[@]}"; do + result+=" ${key} ${arr[$key]}\n" + done + echo -e "$result" +} + _dump_args() { - log "Dynamo args: $(for key in "${!dynamo_args[@]}"; do echo -n "$key: ${dynamo_args[$key]}; "; done)" - log "Prefill args: $(for key in "${!prefill_args[@]}"; do echo -n "$key: ${prefill_args[$key]}; "; done)" - log "Decode args: $(for key in "${!decode_args[@]}"; do echo -n "$key: ${decode_args[$key]}; " ; done)" - log "GenAI perf args: $(for key in "${!genai_perf_args[@]}"; do echo -n "$key: ${genai_perf_args[$key]}; "; done)" + log "Dynamo args:\n$(arg_array_to_string dynamo_args)" + log "Prefill config params:\n$(arg_array_to_string prefill_config)" + log "Prefill args:\n$(arg_array_to_string prefill_args)" + log "Decode config params:\n$(arg_array_to_string decode_config)" + log "Decode args:\n$(arg_array_to_string decode_args)" + log "LMCache config params:\n$(arg_array_to_string lmcache_config)" + log "LMCache args:\n$(arg_array_to_string lmcache_args)" + log "GenAI config params:\n$(arg_array_to_string genai_perf_config)" + log "GenAI perf args:\n$(arg_array_to_string genai_perf_args)" + log "LMBench config params:\n$(arg_array_to_string lmbench_config)" + log "LMBench args:\n$(arg_array_to_string lmbench_args)" + log "Custom workload config params:\n$(arg_array_to_string custom_workload_config)" + log "Custom workload args:\n$(arg_array_to_string custom_workload_args)" + log "--------------------------------" } function parse_args() { _parse_cli_pairs "$@" + _set_nodelists _set_backend_defaults - _sync_num_nodes_from_section_args _patch_dynamo_args + _patch_section_args + _apply_connector_settings _compute_worker_allocation _dump_args } +function replace_placeholders() { + local val="$1" + val=${val//%MODEL%/${dynamo_args["model"]}} + val=${val//%PORT%/${dynamo_args["port"]}} + val=${val//%URL%/${dynamo_args["url"]}} + val=${val//%ENDPOINT%/${dynamo_args["endpoint"]}} + val=${val//%RESULTS_DIR%/${RESULTS_DIR}} + val=${val//%INSTALL_DIR%/${INSTALL_DIR}} + val=${val//%HUGGINGFACE_HOME%/${HUGGINGFACE_HOME}} + echo "$val" +} + function array_to_args() { local -n arr=$1 local result="" for key in "${!arr[@]}"; do - if [[ "$key" == "--extra-args" ]] || \ - [[ "$key" == "--num-nodes" ]] || \ - [[ "$key" == "--nodes" ]]; then - continue + shopt -s nocasematch + val=$(replace_placeholders "${arr[$key]}") + # Quote values that contain spaces + if [[ "$val" == *" "* ]]; then + val="${val//\"/\\\"}" # Escape existing quotes + result+="${key} \"${val}\" " else - result+="${key} ${arr[$key]} " + result+="${key} ${val} " fi done echo "$result" @@ -376,37 +436,55 @@ _detect_fatal_once() { _is_vllm || return 0 local n=0 # Worker logs and UCX logs - n=$(( n + $(grep -E "${DYNAMO_WORKER_ERROR_PATTERN}" "${RESULTS_DIR}"/dynamo_*.log 2>/dev/null | wc -l || true) )) + n=$(( n + $(grep -E "${dynamo_args["worker-error-pattern"]}" "${RESULTS_DIR}"/dynamo_*.log 2>/dev/null | wc -l || true) )) n=$(( n + $(grep -E "UCX.*ERROR" "${RESULTS_DIR}"/ucx_log_*.log 2>/dev/null | wc -l || true) )) echo "${n}" } +function perform_exit() +{ + local exit_code=$1 + local sleep_before_exit="${dynamo_args["sleep-before-exit"]}" + if [[ -n "${sleep_before_exit}" ]]; then + log "Sleeping for ${sleep_before_exit} seconds before exit" + sleep "${sleep_before_exit}" + fi + exit "${exit_code}" +} + exit_on_error() { local fatal=$(_detect_fatal_once) + if [ -f "${DONE_MARKER}" ]; then + log "DONE_MARKER found. Skipping error check." + return + fi if [[ "${fatal}" -gt 0 ]]; then log "FATAL: detected ${fatal} fatal error line(s). Writing ${FATAL_ERROR_MARKER} and terminating." + sleep 1 + touch "${FATAL_ERROR_MARKER}" + grep -E "${dynamo_args["worker-error-pattern"]}|UCX.*ERROR" "${RESULTS_DIR}"/*.log 2>/dev/null > "${FATAL_ERROR_MARKER}" # Try to stop background jobs for a cleaner exit, but do not loop kill $(jobs -p) 2>/dev/null || true # Exit non-zero so srun can retry - exit 1 + perform_exit 1 fi } _total_workers_prefill() { - echo $(( dynamo_args["num-prefill-nodes"] * dynamo_args["prefill-workers-per-node"] )) + echo $(( prefill_config["num-nodes"] * prefill_config["workers-per-node"] )) } _total_workers_decode() { - echo $(( dynamo_args["num-decode-nodes"] * dynamo_args["decode-workers-per-node"] )) + echo $(( decode_config["num-nodes"] * decode_config["workers-per-node"] )) } _count_initialized_prefill() { - grep -i -l -E "${dynamo_args["prefill-initialized-regex"]}" "${RESULTS_DIR}"/dynamo_*prefill* 2>/dev/null | wc -l + grep -i -l -E "${prefill_config["worker-initialized-regex"]}" "${RESULTS_DIR}"/dynamo_*prefill* 2>/dev/null | wc -l } _count_initialized_decode() { - grep -i -l -E "${dynamo_args["decode-initialized-regex"]}" "${RESULTS_DIR}"/dynamo_*decode* 2>/dev/null | wc -l + grep -i -l -E "${decode_config["worker-initialized-regex"]}" "${RESULTS_DIR}"/dynamo_*decode* 2>/dev/null | wc -l } _expected_ready_prefill() { @@ -457,12 +535,24 @@ _is_frontend_node() { _is_decode_node() { local name="$(_current_node_name)" - [[ "${dynamo_args["decode-nodes"]}" == *"$name"* ]] + [[ "${decode_config["node-list"]}" == *"$name"* ]] } _is_prefill_node() { local name="$(_current_node_name)" - [[ "${dynamo_args["prefill-nodes"]}" == *"$name"* ]] + [[ "${prefill_config["node-list"]}" == *"$name"* ]] +} + +_is_genai_perf_workload() { + [[ "${dynamo_args["workloads"]}" == *"genai_perf.sh"* ]] +} + +_is_lmbench_workload() { + [[ "${dynamo_args["workloads"]}" == *"lmbench.sh"* ]] +} + +_is_custom_workload_workload() { + [[ "${dynamo_args["workloads"]}" == *"custom_workload.sh"* ]] } _init_runtime_env() { @@ -479,11 +569,14 @@ _init_runtime_env() { function launch_node_setup_cmd() { + logfile="${RESULTS_DIR}/node_setup_$(_current_node_name).log" if [[ -n "${dynamo_args["node-setup-cmd"]}" ]]; then log "Launching node setup command: ${dynamo_args["node-setup-cmd"]}" - bash -c "${dynamo_args["node-setup-cmd"]}" + bash -c "${dynamo_args["node-setup-cmd"]}" 2>&1 >> "$logfile" log "Node setup complete" fi + + log "Node environment:\n$(env)" 2>&1 >> "$logfile" } _require_cmd() { @@ -543,6 +636,8 @@ validate_environment() { _require_cmd grep _require_cmd cut _require_cmd curl + _require_cmd jq + _require_cmd uv # Runtime commands invoked later _require_cmd python3 @@ -555,14 +650,6 @@ validate_environment() { exit 1 fi - # If both nodelists are empty, DYNAMO_NODELIST must be provided - if [[ -z "${dynamo_args["decode-nodes"]}" && -z "${dynamo_args["prefill-nodes"]}" ]]; then - if [[ -z "${DYNAMO_NODELIST:-}" ]]; then - log "ERROR: When neither --dynamo-decode-nodes nor --dynamo-prefill-nodes is provided, DYNAMO_NODELIST must be set" - exit 1 - fi - fi - # Directories _ensure_dir_writable "$RESULTS_DIR" if _is_vllm; then @@ -603,6 +690,22 @@ validate_environment() { log "Environment validation complete" } +function wait_for_frontend_marker() +{ + while [ ! -f "$DONE_MARKER" ]; do + exit_on_error + log "Waiting for frontend completion marker by polling $DONE_MARKER" + sleep 30 + done + + log "Done marker found." +} + +function mark_done() +{ + touch "$DONE_MARKER" +} + function launch_etcd() { log "Launching etcd with cmd: ${dynamo_args["etcd-cmd"]} --listen-client-urls http://0.0.0.0:${dynamo_args["etcd-port"]} --advertise-client-urls http://0.0.0.0:${dynamo_args["etcd-port"]}" @@ -640,14 +743,14 @@ function launch_decode() { wait_for_etcd - local workers_per_node=${dynamo_args["decode-workers-per-node"]} - local tp_size=${decode_args["--${dynamo_args["tp-arg-name"]}"]} + local workers_per_node=${decode_config["workers-per-node"]} + local tp_size=${decode_args["--tensor-parallel-size"]} local base_nixl_port=${VLLM_NIXL_SIDE_CHANNEL_PORT:-5557} local base_kv_event_port=${DYN_VLLM_KV_EVENT_PORT:-20080} log "Launching $workers_per_node decode worker(s) with unique port ranges" for i in $(seq 0 $(( $workers_per_node - 1 ))); do - local gpu_list=$(_gpu_list_for_worker "${dynamo_args["decode-gpus-per-worker"]}" "$i") + local gpu_list=$(_gpu_list_for_worker "${decode_config["gpus-per-worker"]}" "$i") local log_file=$(_log_file_for_worker "decode" "$i") # Each worker needs unique port ranges to avoid ZMQ conflicts: # - NIXL side channel: base_port + (worker_index * tp_size) for TP ranks @@ -656,11 +759,11 @@ function launch_decode() local kv_event_port=$((base_kv_event_port + i)) log "Launching decode worker $i on GPUs $gpu_list (NIXL port: $nixl_port, KV event port: $kv_event_port)" - log "Decode cmd: ${dynamo_args["decode-cmd"]} $(array_to_args decode_args) ${decode_args["--extra-args"]}" + log "Decode cmd: ${decode_config["cmd"]} $(array_to_args decode_args) ${decode_args["--extra-args"]}" CUDA_VISIBLE_DEVICES=$gpu_list \ VLLM_NIXL_SIDE_CHANNEL_PORT=$nixl_port \ DYN_VLLM_KV_EVENT_PORT=$kv_event_port \ - ${dynamo_args["decode-cmd"]} \ + ${decode_config["cmd"]} \ $(array_to_args decode_args) ${decode_args["--extra-args"]} > $log_file 2>&1 & done } @@ -678,14 +781,14 @@ function launch_prefill() { wait_for_etcd - local workers_per_node=${dynamo_args["prefill-workers-per-node"]} - local tp_size=${prefill_args["--${dynamo_args["tp-arg-name"]}"]} + local workers_per_node=${prefill_config["workers-per-node"]} + local tp_size=${prefill_args["--tensor-parallel-size"]} local base_nixl_port=${VLLM_NIXL_SIDE_CHANNEL_PORT:-5557} local base_kv_event_port=${DYN_VLLM_KV_EVENT_PORT:-20080} log "Launching $workers_per_node prefill worker(s) with unique port ranges" for i in $(seq 0 $(( $workers_per_node - 1 ))); do - local gpu_list=$(_gpu_list_for_worker "${dynamo_args["prefill-gpus-per-worker"]}" "$i") + local gpu_list=$(_gpu_list_for_worker "${prefill_config["gpus-per-worker"]}" "$i") local log_file=$(_log_file_for_worker "prefill" "$i") # Each worker needs unique port ranges to avoid ZMQ conflicts: # - NIXL side channel: base_port + (worker_index * tp_size) for TP ranks @@ -694,22 +797,32 @@ function launch_prefill() local kv_event_port=$((base_kv_event_port + i)) log "Launching prefill worker $i on GPUs $gpu_list (NIXL port: $nixl_port, KV event port: $kv_event_port)" - log "Prefill cmd: ${dynamo_args["prefill-cmd"]} $(array_to_args prefill_args) ${prefill_args["--extra-args"]}" + log "Prefill cmd: ${prefill_config["cmd"]} $(array_to_args prefill_args) ${prefill_args["--extra-args"]}" CUDA_VISIBLE_DEVICES=$gpu_list \ VLLM_NIXL_SIDE_CHANNEL_PORT=$nixl_port \ DYN_VLLM_KV_EVENT_PORT=$kv_event_port \ - ${dynamo_args["prefill-cmd"]} \ + ${prefill_config["cmd"]} \ $(array_to_args prefill_args) ${prefill_args["--extra-args"]} > $log_file 2>&1 & done } +function launch_lmcache_controller() +{ + if [[ "${dynamo_args["connector"]}" != "lmcache" ]]; then + return + fi + + log "Launching LMCache controller with cmd: ${lmcache_config["controller_cmd"]}" + ${lmcache_config["controller_cmd"]} > ${RESULTS_DIR}/lmcache_controller.log 2>&1 +} + function wait_for_dynamo_frontend() { - local want_prefill=$(_expected_ready_prefill) + local want_prefill=0 #$(_expected_ready_prefill) local want_decode=$(_expected_ready_decode) while :; do - local have_prefill=$(_count_initialized_prefill) + local have_prefill=0 #$(_count_initialized_prefill) local have_decode=$(_count_initialized_decode) log "Initialized: prefill ${have_prefill}/${want_prefill}; decode ${have_decode}/${want_decode}" @@ -725,44 +838,185 @@ function wait_for_dynamo_frontend() log "Dynamo frontend is ready" } -_probe_frontend_once() { +_query_frontend() { + local content="${1:-The color of sky is}" + content=$(echo "$content" | sed 's/"/\\"/g' | sed 's/\n/\\n/g') + local max_tokens="${2:-10}" + local json='{ "model": "'${dynamo_args["model"]}'", - "messages": [{"role": "user", "content": "The color of sky is"}], + "messages": [{"role": "user", "content": "'"$content"'"}], "stream": false, - "max_tokens": 10 + "max_tokens": '$max_tokens', + "temperature": 0, + "top_p": 0.0001 }' - curl -s -X POST "${dynamo_args["url"]}/v1/chat/completions" -H "Content-Type: application/json" -d "$json" + + echo "$json" > "$RESULTS_DIR/curl_cmd.json" + curl -s -X POST "${dynamo_args["url"]}/v1/chat/completions" -H "Content-Type: application/json" -d @$RESULTS_DIR/curl_cmd.json } -function launch_genai_perf() +function setup_cufile() { - wait_for_dynamo_frontend - - local resp=$(_probe_frontend_once) - echo "Response: $resp" + export CUFILE_ENV_PATH_JSON="$RESULTS_DIR/cufile.json" + cat < $CUFILE_ENV_PATH_JSON +{ + // NOTE : Application can override custom configuration via export CUFILE_ENV_PATH_JSON= + // e.g : export CUFILE_ENV_PATH_JSON="/home//cufile.json" + "properties": { + // allow compat mode, this will enable use of cuFile posix read/writes + "allow_compat_mode": true, + // max IO chunk size (parameter should be multiples of 64K) used by cuFileRead/Write internally per IO request + "max_direct_io_size_kb" : 16384, + // device memory size (parameter should be 4K aligned) for reserving bounce buffers for the entire GPU + "max_device_cache_size_kb" : 2097152, + // Note: ensure (max_device_cache_size_kb / per_buffer_cache_size_kb) >= io_batchsize + // per-io bounce-buffer size (parameter should be multiples of 64K) ranging from 1024kb to 16384kb + "per_buffer_cache_size_kb": 16384, + // limit on maximum device memory size (parameter should be 4K aligned) that can be pinned for a given process + "max_device_pinned_mem_size_kb" : 33554432, + + // posix bounce buffer pool size allocations + "posix_pool_slab_size_kb" : [16384], + // posix bounce buffer pool max counts + "posix_pool_slab_count": [1024] + }, + "logging": { + "dir": "$RESULTS_DIR", + "level": "${CUFILE_LOG_LEVEL:-INFO}" + } +} +EOF +} - local genai_perf_arguments=$(array_to_args genai_perf_args) - log "Launching genai-perf with cmd: ${dynamo_args["genai-perf-cmd"]} $genai_perf_arguments ${genai_perf_args["--extra-args"]}" +function setup_storage_cache_dir() +{ + # Use a global variable that can be exported + STORAGE_CACHE_DIR="${dynamo_args["storage-cache-dir"]}/${TEST_USER}/${dynamo_args["frontend-node"]}/${dynamo_args["connector"]}/cache" + rm -rf ${STORAGE_CACHE_DIR} + mkdir -p ${STORAGE_CACHE_DIR} + chmod 755 ${STORAGE_CACHE_DIR} +} - ${dynamo_args["genai-perf-cmd"]} ${genai_perf_arguments} ${genai_perf_args["--extra-args"]} > ${RESULTS_DIR}/genai_perf.log 2>&1 +function setup_kvbm() +{ + if [[ "${dynamo_args["connector"]}" != "kvbm" ]]; then + return + fi - log "Done with genai-perf run" + setup_storage_cache_dir + export DYN_KVBM_DISK_CACHE_DIR=${STORAGE_CACHE_DIR} + setup_cufile } -function wait_for_frontend_marker() +function setup_lmcache() { - while [ ! -f "$DONE_MARKER" ]; do - exit_on_error - log "Waiting for frontend completion marker by polling $DONE_MARKER" - sleep 30 + if [[ "${dynamo_args["connector"]}" != "lmcache" ]]; then + return + fi + + local lmcache_path="${lmcache_config["repo"]}" + log "Installing LMCache using: uv pip install $lmcache_path" + uv pip install -e $lmcache_path + + setup_storage_cache_dir + + export LMCACHE_CONFIG_FILE=$RESULTS_DIR/lmcache-nixl-config.yaml + rm -f $LMCACHE_CONFIG_FILE + + for key in "${!lmcache_args[@]}"; do + shopt -s nocasematch + if [[ "$key" == "extra_config"* ]]; then + continue + fi + + val="${lmcache_args[$key]}" + echo "$key: $val" >> $LMCACHE_CONFIG_FILE done - log "Done marker found." + echo "extra_config:" >> $LMCACHE_CONFIG_FILE + for key in "${!lmcache_args[@]}"; do + shopt -s nocasematch + if [[ "$key" == "extra_config"* ]]; then + nkey="${key#extra_config_}" + val="${lmcache_args[$key]}" + val=${val//%CACHEDIR%/${STORAGE_CACHE_DIR}} + echo " $nkey: $val" >> $LMCACHE_CONFIG_FILE + fi + done + setup_cufile +} + +function log_gpu_utilization() +{ + # Check if nvidia-smi is available + if ! command -v nvidia-smi &> /dev/null; then + log "Error: nvidia-smi not found" + return + fi + + wait_for_dynamo_frontend + log "Starting GPU utilization monitoring" + + nvidia-smi \ + --query-gpu=timestamp,name,pci.bus_id,pstate,pcie.link.gen.max,pcie.link.gen.current,temperature.gpu,utilization.gpu,utilization.memory,memory.total,memory.free,memory.used \ + --format=csv \ + -l 5 \ + -f ${RESULTS_DIR}/gpu_utilization-${SLURM_NODEID}.csv +} + +function launch_workload() +{ + local workload_config_name="$1" + local workload_args_name="$2" + + # Create nameref to the associative arrays + local -n workload_config_ref="$workload_config_name" + local -n workload_args_ref="$workload_args_name" + + local workload_name="${workload_config_ref["--name"]}" + local script="${workload_config_ref["--script"]}" + local expanded_config=$(array_to_args "$workload_config_name") + local expanded_arguments=$(array_to_args "$workload_args_name") + + log "Launching $workload_name with cmd: ${INSTALL_DIR}/$script $expanded_config -- $expanded_arguments" + + # Use eval to properly handle values with spaces in expanded_config + eval bash ${INSTALL_DIR}/$script \ + --install_dir "$INSTALL_DIR" \ + --result_dir "$RESULTS_DIR" \ + --model '"${dynamo_args["model"]}"' \ + --url '"http://${dynamo_args["frontend-node"]}"' \ + --port '"${dynamo_args["port"]}"' \ + --endpoint '"${dynamo_args["endpoint"]}"' \ + --gpus_per_node '"$(_gpus_per_node)"' \ + $expanded_config \ + -- $expanded_arguments '>' ${RESULTS_DIR}/$workload_name.log '2>&1' + + log "Done with $workload_name run" +} + +function launch_workloads() +{ + wait_for_dynamo_frontend + + if _is_genai_perf_workload; then + launch_workload genai_perf_config genai_perf_args + fi + if _is_lmbench_workload; then + launch_workload lmbench_config lmbench_args + fi + if _is_custom_workload_workload; then + launch_workload custom_workload_config custom_workload_args + fi + + mark_done } function main() { + parse_args "$@" + _init_runtime_env launch_node_setup_cmd @@ -773,9 +1027,15 @@ function main() cd ${dynamo_args["workspace-path"]} fi + cd $RESULTS_DIR + + log_gpu_utilization & + if _is_frontend_node; then log "Node ID: $SLURM_NODEID, Role: frontend" log_node_role "$(_current_node_name)" "frontend" + setup_lmcache + setup_kvbm launch_etcd & launch_nats & wait_for_etcd @@ -798,17 +1058,18 @@ function main() fi if _is_frontend_node; then - launch_genai_perf - touch "$DONE_MARKER" + launch_lmcache_controller & + + sleep 10 + + launch_workloads & fi wait_for_frontend_marker } -parse_args "$@" - -log "env: $(env)" - log "Starting main" -main +main "$@" log "Done with main" + +perform_exit 0 diff --git a/src/cloudai/workloads/ai_dynamo/calc_percentile_csv.py b/src/cloudai/workloads/ai_dynamo/calc_percentile_csv.py new file mode 100644 index 000000000..465b6983c --- /dev/null +++ b/src/cloudai/workloads/ai_dynamo/calc_percentile_csv.py @@ -0,0 +1,139 @@ +# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import csv +import math +import os +from typing import Any, Dict, List + + +def compute_percentile(sorted_values: List[float], percentile: float) -> float: + if not sorted_values: + return float("nan") + if percentile <= 0: + return float(sorted_values[0]) + if percentile >= 100: + return float(sorted_values[-1]) + # Nearest-rank linear interpolation (common in data tools) + k = (len(sorted_values) - 1) * (percentile / 100.0) + f = math.floor(k) + c = math.ceil(k) + if f == c: + return float(sorted_values[int(k)]) + d0 = sorted_values[f] * (c - k) + d1 = sorted_values[c] * (k - f) + return float(d0 + d1) + + +def summarize(values: List[float]) -> Dict[str, float]: + if not values: + return { + "avg": float("nan"), + "min": float("nan"), + "max": float("nan"), + "p99": float("nan"), + "p95": float("nan"), + "p90": float("nan"), + "p75": float("nan"), + "p50": float("nan"), + "p25": float("nan"), + "p10": float("nan"), + "p5": float("nan"), + "p1": float("nan"), + } + sorted_vals = sorted(values) + avg_val = sum(sorted_vals) / len(sorted_vals) + return { + "avg": round(avg_val, 2), + "min": round(sorted_vals[0], 2), + "max": round(sorted_vals[-1], 2), + "p99": round(compute_percentile(sorted_vals, 99), 2), + "p95": round(compute_percentile(sorted_vals, 95), 2), + "p90": round(compute_percentile(sorted_vals, 90), 2), + "p75": round(compute_percentile(sorted_vals, 75), 2), + "p50": round(compute_percentile(sorted_vals, 50), 2), + "p25": round(compute_percentile(sorted_vals, 25), 2), + "p10": round(compute_percentile(sorted_vals, 10), 2), + "p5": round(compute_percentile(sorted_vals, 5), 2), + "p1": round(compute_percentile(sorted_vals, 1), 2), + } + + +def parse_float_safe(value: Any) -> float: + try: + return float(value) + except Exception: + return float("nan") + + +def main() -> None: + parser = argparse.ArgumentParser(description="Summarize LMCACHE bench CSV metrics") + parser.add_argument("--input", "-i", help="Path to input CSV (e.g., lmcache_bench_output_0.1.csv)") + parser.add_argument("--output", "-o", help="Path to write summary CSV. Defaults to _summary.csv") + args = parser.parse_args() + + input_path = args.input + output_path = args.output or f"{input_path}_summary.csv" + + rows: List[Dict[str, Any]] = [] + with open(input_path, newline="") as f: + reader = csv.DictReader(f) + for r in reader: + rows.append(r) + + # Build summaries + summaries: List[Dict[str, Any]] = [] + + def append_summary(metric_name: str, values: List[float]) -> None: + clean_values = [v for v in values if v is not None and not math.isnan(v)] + stats = summarize(clean_values) + summaries.append({"Metric": metric_name, **stats}) + + # Summarize all numeric columns present in the CSV + all_columns: List[str] = list(rows[0].keys()) if rows else [] + for col in all_columns: + col_values = [parse_float_safe(r.get(col)) for r in rows] + append_summary(col, col_values) + + fieldnames = [ + "Metric", + "avg", + "min", + "max", + "p99", + "p95", + "p90", + "p75", + "p50", + "p25", + "p10", + "p5", + "p1", + ] + + os.makedirs(os.path.dirname(output_path) or ".", exist_ok=True) + with open(output_path, "w", newline="") as f: + writer = csv.DictWriter(f, fieldnames=fieldnames) + writer.writeheader() + for row in summaries: + writer.writerow(row) + + print(f"Wrote summary to: {output_path}") + + +if __name__ == "__main__": + main() diff --git a/src/cloudai/workloads/ai_dynamo/custom_workload.sh b/src/cloudai/workloads/ai_dynamo/custom_workload.sh new file mode 100644 index 000000000..503682332 --- /dev/null +++ b/src/cloudai/workloads/ai_dynamo/custom_workload.sh @@ -0,0 +1,172 @@ +#! /bin/bash + +# Called as: + # bash ./custom_workload.sh --result_dir --report_file --calc_percentile_csv_script --gpus_per_node -- + +# extract result_dir, report_file, and calc_percentile_csv_script from the command line arguments +result_dir="" +report_name="custom_workload_report.csv" +model="" +url="" +port="" +endpoint="" +all_isl="" +declare -A workload_args + + +# Simple log function +log() { + echo "[$(date +%F\ %T) $(hostname)]: $*" +} + +function parse_custom_workload_args() +{ + local args="$@" + while [[ $# -gt 0 ]]; do + case "$1" in + --*) + workload_args["${1}"]="$2" + shift 2 + ;; + *) + shift + ;; + esac + done +} + +function process_args() +{ + while [[ $# -gt 0 ]]; do + case "$1" in + --model) + model="$2" + shift 2 + ;; + --url) + url="$2" + shift 2 + ;; + --port) + port="$2" + shift 2 + ;; + --endpoint) + endpoint="$2" + shift 2 + ;; + --result_dir) + result_dir="$2" + shift 2 + ;; + --install_dir) + install_dir="$2" + shift 2 + ;; + --report_name) + report_name="$2" + shift 2 + ;; + --isl) + all_isl="$2" + shift 2 + ;; + --) + shift + parse_custom_workload_args "$@" + break + ;; + --*) + shift 2 + ;; + *) + shift + ;; + esac + done + + log """Parsed args: + model: $model + url: $url + port: $port + endpoint: $endpoint + result_dir: $result_dir + install_dir: $install_dir + report_name: $report_name + isl: $all_isl + workload_args: $(for key in "${!workload_args[@]}"; do echo -n "$key: ${workload_args[$key]} "; done) + """ +} + +#function clear_lmcache() +#{ +# log "Clearing LMCache" +# +# response=$(curl -X POST http://${lmcache_config["controller_url"]}/clear \ +# -H "Content-Type: application/json" \ +# -d '{ +# "instance_id": "lmcache_default_instance", +# "location": "LocalCPUBackend" +# }') +# +# log "LMCache cleared. Response: $response" +#} + +function clear_kv_cache() +{ + # Use default ports if env vars not set + local metrics_port="${DYN_METRICS_PORT:-${port}}" # Default to the main API port + local kvbm_metrics_port="${DYN_KVBM_METRICS_PORT:-6880}" # Default KVBM metrics port + + local dyn_metrics_endpoint="${url}:${metrics_port}" + local kvbm_metrics_endpoint="${url}:${kvbm_metrics_port}/metrics" + + # This clears G1 (GPU) + G2 (CPU) + G3 (Disk) at once + status=$(curl -s ${dyn_metrics_endpoint}/metrics 2>/dev/null | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") + log "KV cache status before clear: $status" + + response=$(curl -s -X POST ${dyn_metrics_endpoint}/reset_prefix_cache 2>/dev/null || echo "endpoint unavailable") + log "KV prefix cache reset. Response: $response" + + response=$(curl -s -X POST ${dyn_metrics_endpoint}/clear_kv_blocks 2>/dev/null || echo "endpoint unavailable") + log "KV blocks cleared. Response: $response" + + status=$(curl -s ${dyn_metrics_endpoint}/metrics 2>/dev/null | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") + log "KV cache status after clear: $status" + + status=$(curl -s ${kvbm_metrics_endpoint} | grep -E "host_cache_hit_rate|disk_cache_hit_rate") + log "KVBM cache hit rates after clear: $status" + + # if [[ "${dynamo_args["connector"]}" == "lmcache" ]]; then + # clear_lmcache + # fi +} + +function main() +{ + process_args "$@" + + report_file=$result_dir/$report_name + + log "Launching custom workload with ISLs: $all_isl" + + python3 ${install_dir}/openai_chat_client.py --dump_csv_header --out $report_file + + for isl in $(echo $all_isl | tr ',' '\n'); do + log "Launching custom workload with ISL: $isl" + python3 ${install_dir}/openai_chat_client.py \ + --model $model \ + --url $url:$port/v1 \ + --isl $isl \ + --osl 10 \ + --out $report_file \ + --num_filler_prompts 100 \ + --filler_len_chars 10000 + + clear_kv_cache + done + + log "Done with custom workload run" +} + +main "$@" diff --git a/src/cloudai/workloads/ai_dynamo/genai_perf.sh b/src/cloudai/workloads/ai_dynamo/genai_perf.sh new file mode 100644 index 000000000..26d9cbcf0 --- /dev/null +++ b/src/cloudai/workloads/ai_dynamo/genai_perf.sh @@ -0,0 +1,106 @@ +#! /bin/bash + +# Called as: + # ./genai_perf_wrapper.sh --result_dir --report_file --calc_percentile_csv_script --gpus_per_node -- + +# Simple log function +log() { + echo "[$(date +%F\ %T) $(hostname)]: $*" +} + +# extract result_dir, report_file, and calc_percentile_csv_script from the command line arguments +result_dir="" +report_name="genai_perf_report.csv" +calc_percentile_csv_script="" +gpus_per_node=1 +port="" +repo="" +cmd="" +cmdline_args=() +extra_args="" + +while [[ $# -gt 0 ]]; do + case "$1" in + --model) + model="$2" + shift 2 + ;; + --url) + url="$2" + shift 2 + ;; + --port) + port="$2" + shift 2 + ;; + --endpoint) + endpoint="$2" + shift 2 + ;; + --result_dir) + result_dir="$2" + shift 2 + ;; + --install_dir) + install_dir="$2" + shift 2 + ;; + --gpus_per_node) + gpus_per_node="$2" + shift 2 + ;; + --report_name) + report_name="$2" + shift 2 + ;; + --cmd) + cmd="$2" + shift 2 + ;; + --extra-args|--extra_args) + extra_args="$2" + shift 2 + ;; + --) + shift + cmdline_args=("$@") + break + ;; + --*) + shift 2 + ;; + *) + shift + ;; + esac +done + +# Combine cmdline_args and extra_args +cmdline_args="${cmdline_args[*]} $extra_args" + +# Build the full command with model and url +full_cmd="$cmd $cmdline_args" + +# launch genai-perf +log "Launching genai-perf with args: $full_cmd" + +eval $full_cmd + +log "Done with genai-perf run" + +# Calculate total GPUs - use SLURM_JOB_NUM_NODES if available, otherwise default to 1 node +num_nodes=${SLURM_JOB_NUM_NODES:-1} +total_gpus=$(( $gpus_per_node * $num_nodes )) + +profile_path=$(find $result_dir -type f -name "profile_genai_perf.csv" -print -quit) +sed -ie 's/\r//g' $profile_path +if [[ -f "$profile_path" ]]; then + output_tokens_per_second=$(grep "Output Token Throughput (tokens/sec)" $profile_path | cut -d ',' -f 2) + output_tokens_per_second_per_gpu=$(awk "BEGIN {printf \"%.2f\", $output_tokens_per_second / $total_gpus}") + request_throughput=$(grep "Request Throughput (per sec)" $profile_path | cut -d ',' -f 2) + request_count=$(grep "Request Count (count)" $profile_path | cut -d ',' -f 2) + grep ".*,.*,.*,.*" $profile_path > $result_dir/$report_name + echo "Output tokens per second per gpu,$output_tokens_per_second_per_gpu,0,0,0,0,0,0,0,0,0,0,0" >> $result_dir/$report_name + echo "Request throughput per second,$request_throughput,0,0,0,0,0,0,0,0,0,0,0" >> $result_dir/$report_name + echo "Request count,$request_count,0,0,0,0,0,0,0,0,0,0,0" >> $result_dir/$report_name +fi diff --git a/src/cloudai/workloads/ai_dynamo/kubernetes_json_gen_strategy.py b/src/cloudai/workloads/ai_dynamo/kubernetes_json_gen_strategy.py index 4b661d546..f7ce95fd3 100644 --- a/src/cloudai/workloads/ai_dynamo/kubernetes_json_gen_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/kubernetes_json_gen_strategy.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2024-2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2024-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,7 +21,7 @@ from cloudai.core import JsonGenStrategy from cloudai.systems.kubernetes import KubernetesSystem -from .ai_dynamo import AIDynamoTestDefinition, WorkerBaseArgs +from .ai_dynamo import AIDynamoTestDefinition, WorkerBaseArgs, WorkerConfig class AIDynamoKubernetesJsonGenStrategy(JsonGenStrategy): @@ -126,14 +126,14 @@ def _to_dynamo_arg(self, arg_name: str) -> str: def _dynamo_args_dict(self, model: WorkerBaseArgs) -> dict: return model.model_dump(exclude={"num_nodes", "extra_args", "nodes"}, exclude_none=True) - def _args_from_worker_config(self, worker: WorkerBaseArgs) -> list[str]: + def _args_from_worker_config(self, worker: WorkerConfig) -> list[str]: args = [] - for arg, value in self._dynamo_args_dict(worker).items(): + for arg, value in self._dynamo_args_dict(worker.args).items(): args.extend([self._to_dynamo_arg(arg), str(value)]) if worker.extra_args: args.append(f"{worker.extra_args}") return args - def _set_multinode_if_needed(self, cfg: dict[str, Any], worker: WorkerBaseArgs) -> None: + def _set_multinode_if_needed(self, cfg: dict[str, Any], worker: WorkerConfig) -> None: if cast(int, worker.num_nodes) > 1: cfg["multinode"] = {"nodeCount": worker.num_nodes} diff --git a/src/cloudai/workloads/ai_dynamo/lmbench.sh b/src/cloudai/workloads/ai_dynamo/lmbench.sh new file mode 100644 index 000000000..75eaffc8a --- /dev/null +++ b/src/cloudai/workloads/ai_dynamo/lmbench.sh @@ -0,0 +1,119 @@ +#! /bin/bash + +# Called as: + # bash ./lmbench.sh --result_dir --report_file --calc_percentile_csv_script --gpus_per_node -- + +# Simple log function +log() { + echo "[$(date +%F\ %T) $(hostname)]: $*" +} + +# extract result_dir, report_file, and calc_percentile_csv_script from the command line arguments +result_dir="" +report_name="lmbench_report.csv" +calc_percentile_csv_script="calc_percentile_csv.py" +gpus_per_node=1 +lmbench_dir="/git/LMBenchmark" +install_dir="" +port="" +cmd="" +all_qps="" +cmdline_args=() + +while [[ $# -gt 0 ]]; do + case "$1" in + --model) + model="$2" + shift 2 + ;; + --url) + url="$2" + shift 2 + ;; + --port) + port="$2" + shift 2 + ;; + --install_dir) + install_dir="$2" + shift 2 + ;; + --endpoint) + endpoint="$2" + shift 2 + ;; + --result_dir) + result_dir="$2" + shift 2 + ;; + --report_name) + report_name="$2" + shift 2 + ;; + --extra_args) + extra_args="$2" + shift 2 + ;; + --repo) + lmbench_dir="$2" + shift 2 + ;; + --gpus_per_node) + gpus_per_node="$2" + shift 2 + ;; + --cmd) + cmd="$2" + shift 2 + ;; + --qps) + all_qps="$2" + shift 2 + ;; + --) + shift + cmdline_args="$@" + break + ;; + --*) + shift 2 + ;; + *) + shift + ;; + esac +done + +pushd $lmbench_dir + +cmdline_args="${cmdline_args} --output ${result_dir}/lmbench_bench_output.csv ${key} ${extra_args}" +# launch lmbench + + +# run LMBenchmark, adjust the model name if you are using a different model +# for detail how to config and run LMBenchmark: https://github.com/LMCache/LMBenchmark/tree/main/synthetic-multi-round-qa + +#export NUM_USERS_WARMUP="20" +#export NUM_USERS="15" +#export NUM_ROUNDS="20" +#export SYSTEM_PROMPT="1000" # Shared system prompt length +#export CHAT_HISTORY="7000" # User specific chat history length +#export ANSWER_LEN="100" # Generation length per round +#export INIT_USER_ID="1" +#export TEST_DURATION="600" # Duration of the test in seconds + +log "Launching lmbench with args: $cmd $cmdline_args" + +artifacts_dir="${result_dir}/lmbench_artifacts" +mkdir -p $artifacts_dir + +for qps in ${all_qps//,/ }; do + log "Launching lmbench with args: $cmd $cmdline_args --qps $qps --output $output_file" + output_file="${artifacts_dir}/lmbench_bench_output_${qps}.csv" + report_file="${result_dir}/${report_name//.csv/_${qps}.csv}" + eval "$cmd $cmdline_args --qps $qps --output $output_file" 2>&1 > ${artifacts_dir}/lmbench_bench_output_${qps}.log + python3 ${install_dir}/${calc_percentile_csv_script} --input $output_file --output ${report_file} +done + +log "Done with lmbench run" +popd diff --git a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py new file mode 100644 index 000000000..50a350667 --- /dev/null +++ b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py @@ -0,0 +1,220 @@ +# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Single-shot chat completion client for TTFT benchmark.""" + +# Future +from __future__ import annotations + +# Standard +import argparse +import json +import random +import string +import sys +import time +from pathlib import Path + +# Third Party +from openai import OpenAI # type: ignore[import-untyped] +from transformers import AutoTokenizer # type: ignore[import-untyped] + +# ---------------------------------------------------------------------- +FILLER_LEN_CHARS = 10_000 # ≈ length of each cache-filler prompt +NUM_FILLER_PROMPTS = 100 # how many fillers to send for eviction +# ---------------------------------------------------------------------- + + +# ---------------- helper utilities ------------------------------------ + + +def log_jsonl(path: Path, rec: dict) -> None: + with path.open("a", encoding="utf-8") as fh: + json.dump(rec, fh) + fh.write("\n") + + +class TtftStats: + """Holds TTFT benchmark results including timing and token counts.""" + + def __init__(self, ttft_seconds: float, prompt_tokens: int, cached_tokens: int): + self.ttft_seconds = ttft_seconds + self.prompt_tokens = prompt_tokens + self.cached_tokens = cached_tokens + + +class Chat: + """Represents a chat context with a document for TTFT benchmarking.""" + + def __init__(self, isl: int, model_id: str, max_ctx_tokens: int): + self.isl = isl + self.model_id = model_id + self.max_ctx_tokens = max_ctx_tokens + self.tok = AutoTokenizer.from_pretrained(model_id, use_fast=True) + + raw_doc = "".join(random.choices(string.ascii_letters + string.digits, k=self.isl * 4)) + + ids = self.tok.encode(raw_doc, add_special_tokens=False, truncation=True, max_length=self.isl) + assert len(ids) == self.isl, f"Expected {self.isl} tokens, got {len(ids)}" + doc = self.tok.decode(ids, skip_special_tokens=True) + + self.messages = [ + {"role": "user", "content": f"I've got a document:\n```\n{doc}\n```"}, + {"role": "assistant", "content": "I've got your document."}, + {"role": "user", "content": "summarize"}, + ] + + def stream(self, client: OpenAI, max_tokens: int) -> TtftStats: + stats = TtftStats(0, 0, 0) + + start = time.perf_counter() + try: + stream = client.chat.completions.create( + model=self.model_id, + messages=self.messages, + temperature=0.0, + stream=True, + stream_options={"include_usage": True}, + max_tokens=max_tokens, + ) + + first_tok_t: float | None = None + for chunk in stream: + # Capture prompt_tokens from usage if available + if chunk.usage and chunk.usage.prompt_tokens: + stats.prompt_tokens = chunk.usage.prompt_tokens + # Capture cached_tokens from prompt_tokens_details if available + usage_details = chunk.usage and chunk.usage.prompt_tokens_details + if usage_details and usage_details.cached_tokens is not None: + stats.cached_tokens = usage_details.cached_tokens + if first_tok_t is None and chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: + first_tok_t = time.perf_counter() + + if first_tok_t is None: + raise RuntimeError("no tokens returned") + + stats.ttft_seconds = round(first_tok_t - start, 3) + return stats + except json.JSONDecodeError as e: + print(f"Error: JSON decode error during streaming: {e}") + print("This may indicate empty SSE events from the server - likely a server-side bug") + # Return partial stats with error indication + stats.ttft_seconds = -1 # Indicate error + return stats + except Exception as e: + print(f"Error during streaming: {type(e).__name__}: {e}") + stats.ttft_seconds = -1 # Indicate error + return stats + + +def flush_kv_cache(args: argparse.Namespace, client: OpenAI) -> None: + """Flush KV cache by sending filler prompts.""" + for _ in range(args.num_filler_prompts): + _ = Chat(args.filler_len_chars, args.model, args.max_ctx_tokens).stream(client, 1) + + +# ---------------- command-line parsing -------------------------------- +def parse_args() -> argparse.Namespace: + ap = argparse.ArgumentParser( + prog=Path(sys.argv[0]).name, + description="Single-shot chat completion client for TTFT benchmark.", + ) + ap.add_argument("--dump_csv_header", action="store_true", help="Only dump CSV header and exit.") + ap.add_argument("--url", help="URL of the API endpoint.") + ap.add_argument("--model", help="Model name/ID.") + ap.add_argument("--max_ctx_tokens", type=int, default=131_072, help="Max context tokens.") + ap.add_argument("--isl", type=int, help="Input tokens.") + ap.add_argument("--osl", type=int, help="Output tokens.") + ap.add_argument("--out", help="JSONL file for results.") + ap.add_argument( + "--num_filler_prompts", + type=int, + default=NUM_FILLER_PROMPTS, + help="Number of filler prompts to send for cache flush.", + ) + ap.add_argument( + "--filler_len_chars", + type=int, + default=FILLER_LEN_CHARS, + help="Length of filler prompt in characters.", + ) + return ap.parse_args() + + +# ---------------- main routine ---------------------------------------- +def main() -> None: + args = parse_args() + + result= { + "isl": args.isl, + "context_tokens": 0, + "baseline_cached_tokens": 0, + "baseline_ttft_seconds": 0, + "no_flush_cached_tokens": 0, + "no_flush_ttft_seconds": 0, + "post_flush_cached_tokens": 0, + "post_flush_ttft_seconds": 0, + } + + if args.dump_csv_header: + with Path(args.out).open("a", encoding="utf-8") as f: + f.write(",".join(result.keys())) + f.write("\n") + return + + chat = Chat(args.isl, args.model, args.max_ctx_tokens) + + client = OpenAI(base_url=args.url, api_key="dummy-key-for-local-server") + + # ---------------- RUN 1 ---------------- + print("\n=== Run 1: baseline TTFT ===") + baseline = chat.stream(client, args.osl) + print(f"Run 1: TTFT = {baseline.ttft_seconds:.3f}s with {baseline.cached_tokens} cached tokens") + + # Run 2 with same doc without cache flush + print("\n=== Run 2: TTFT without cache flush ===") + no_flush = chat.stream(client, args.osl) + print(f"Run 2: TTFT = {no_flush.ttft_seconds:.3f}s with {no_flush.cached_tokens} cached tokens") + + # Flush cache + print(f"\nFlushing KV-cache with {NUM_FILLER_PROMPTS} prompts …") + flush_kv_cache(args, client) + + # Run 3 with same doc with cache flush + print("\n=== Run 3: TTFT with cache flush ===") + post_flush = chat.stream(client, args.osl) + print(f"Run 3: TTFT = {post_flush.ttft_seconds:.3f}s with {post_flush.cached_tokens} cached tokens") + + result["context_tokens"] = baseline.prompt_tokens + result["baseline_cached_tokens"] = baseline.cached_tokens + result["baseline_ttft_seconds"] = baseline.ttft_seconds + result["no_flush_cached_tokens"] = no_flush.cached_tokens + result["no_flush_ttft_seconds"] = no_flush.ttft_seconds + result["post_flush_cached_tokens"] = post_flush.cached_tokens + result["post_flush_ttft_seconds"] = post_flush.ttft_seconds + + out_path = Path(args.out) + with out_path.open("a", encoding="utf-8") as f: + if out_path.suffix == ".csv": + line=",".join(str(v) for v in result.values()) + f.write(line + "\n") + else: + json.dump(result, f, indent=2) + f.write("\n") + + +if __name__ == "__main__": + main() diff --git a/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py b/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py index d42582132..b28995c2a 100644 --- a/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -16,18 +16,16 @@ from __future__ import annotations -import csv import logging -import shutil from pathlib import Path -from typing import TYPE_CHECKING, ClassVar, cast +from typing import TYPE_CHECKING + +import pandas as pd from cloudai.core import METRIC_ERROR, ReportGenerationStrategy -from cloudai.systems.kubernetes.kubernetes_system import KubernetesSystem -from cloudai.systems.slurm.slurm_system import SlurmSystem if TYPE_CHECKING: - from .ai_dynamo import AIDynamoTestDefinition + pass CSV_FILES_PATTERN = "profile*_genai_perf.csv" JSON_FILES_PATTERN = "profile*_genai_perf.json" @@ -36,163 +34,37 @@ class AIDynamoReportGenerationStrategy(ReportGenerationStrategy): """Strategy for generating reports from AI Dynamo run directories.""" - metrics: ClassVar[list[str]] = [ - "default", - "output-token-throughput", - "request-throughput", - "time-to-first-token", - "time-to-second-token", - "request-latency", - "inter-token-latency", - ] - - def can_handle_directory(self) -> bool: - output_path = self.test_run.output_path - csv_files = list(output_path.rglob(CSV_FILES_PATTERN)) - json_files = list(output_path.rglob(JSON_FILES_PATTERN)) - logging.debug(f"Found CSV files: {csv_files}, JSON files: {json_files}") - return len(csv_files) > 0 and len(json_files) > 0 - - def _find_csv_file(self) -> Path | None: - output_path = self.test_run.output_path - if not output_path.exists() or not output_path.is_dir(): - return None - - csv_files = list(output_path.rglob(CSV_FILES_PATTERN)) - if not csv_files or csv_files[0].stat().st_size == 0: - return None - - return csv_files[0] - - def _extract_metric_value(self, header: list[str], row: list[str], metric_idx: int) -> float | None: - if "Value" in header: - value_idx = header.index("Value") - return float(row[value_idx].replace(",", "")) - elif "avg" in header: - avg_idx = header.index("avg") - return float(row[avg_idx].replace(",", "")) - return None - - def _find_metric_in_section(self, section: list[list[str]], metric_name: str) -> float | None: - if not section: - return None - - header = section[0] - if "Metric" not in header: - return None - - metric_idx = header.index("Metric") - for row in section[1:]: - if row[metric_idx] == metric_name: - return self._extract_metric_value(header, row, metric_idx) - return None - - def _read_metric_from_csv(self, metric_name: str) -> float: - source_csv = self._find_csv_file() - if not source_csv: + def extract_metric_from_csv(self, csv_file: Path, metric_name: str, metric_type: str) -> float: + df = pd.read_csv(csv_file) + if metric_type not in df.columns: + logging.info(f"Metric type: {metric_type} not in CSV file: {df.columns}") return METRIC_ERROR - sections = self._read_csv_sections(source_csv) - for section in sections: - value = self._find_metric_in_section(section, metric_name) - if value is not None: - return value + if metric_name not in df["Metric"].values: + logging.info(f"Metric name: {metric_name} not in CSV file: {df['Metric'].values}") + return METRIC_ERROR - return METRIC_ERROR + return float(df[df["Metric"] == metric_name][metric_type].values[0]) def get_metric(self, metric: str) -> float: - if metric not in self.metrics: + logging.info(f"Getting metric: {metric}") + benchmark_name = "genai_perf" + metric_name = metric + metric_type = "avg" + + if ":" in metric: + benchmark_name, metric_name, metric_type = metric.split(":") + + source_csv = self.test_run.output_path / f"{benchmark_name}_report.csv" + logging.info(f"CSV file: {source_csv}") + if not source_csv.exists() or source_csv.stat().st_size == 0: + logging.info(f"CSV file: {source_csv} does not exist or is empty") return METRIC_ERROR - metric_mapping = { - "default": "Output Token Throughput (tokens/sec)", - "output-token-throughput": "Output Token Throughput (tokens/sec)", - "request-throughput": "Request Throughput (per sec)", - "time-to-first-token": "Time To First Token (ms)", - "time-to-second-token": "Time To Second Token (ms)", - "request-latency": "Request Latency (ms)", - "inter-token-latency": "Inter Token Latency (ms)", - } - - mapped_metric = metric_mapping.get(metric) - if not mapped_metric: - return METRIC_ERROR + return self.extract_metric_from_csv(source_csv, metric_name, metric_type) - return self._read_metric_from_csv(mapped_metric) - - def _calculate_total_gpus(self) -> int | None: - gpus_per_node = None - if isinstance(self.system, (SlurmSystem, KubernetesSystem)): - gpus_per_node = self.system.gpus_per_node - - if gpus_per_node is None: - return None - - tdef = cast("AIDynamoTestDefinition", self.test_run.test) - - num_frontend_nodes = 1 - num_prefill_nodes = ( - cast(int, tdef.cmd_args.dynamo.prefill_worker.num_nodes) if tdef.cmd_args.dynamo.prefill_worker else 0 - ) - num_decode_nodes = cast(int, tdef.cmd_args.dynamo.decode_worker.num_nodes) - return (num_frontend_nodes + num_prefill_nodes + num_decode_nodes) * gpus_per_node - - def _read_csv_sections(self, source_csv: Path) -> list[list[list[str]]]: - sections = [] - current_section = [] - - with open(source_csv, "r") as f: - csv_reader = csv.reader(f) - for row in csv_reader: - if not any(row): # Empty row indicates section break - if current_section: - sections.append(current_section) - current_section = [] - else: - current_section.append(row) - if current_section: - sections.append(current_section) - - return sections - - def _write_sections_with_metric( - self, target_csv: Path, sections: list[list[list[str]]], total_gpus: int | None - ) -> None: - with open(target_csv, "w", newline="") as f: - writer = csv.writer(f) - - # Write first section (statistical metrics) - if sections: - for row in sections[0]: - writer.writerow(row) - writer.writerow([]) # Empty row for section break - - # Write second section with additional metric if total_gpus is available - if len(sections) > 1: - for row in sections[1]: - writer.writerow(row) - if total_gpus and row and row[0] == "Output Token Throughput (tokens/sec)": - throughput = float(row[1].replace(",", "")) - per_gpu_throughput = throughput / total_gpus - writer.writerow(["Overall Output Tokens per Second per GPU", per_gpu_throughput]) - writer.writerow([]) # Empty row for section break - - # Write remaining sections - for section in sections[2:]: - for row in section: - writer.writerow(row) - writer.writerow([]) # Empty row for section break + def can_handle_directory(self) -> bool: + return True def generate_report(self) -> None: - output_path = self.test_run.output_path - source_csv = next(output_path.rglob(CSV_FILES_PATTERN)) - target_csv = output_path / "report.csv" - - total_gpus = self._calculate_total_gpus() - if total_gpus is None: - logging.warning("gpus_per_node is None, skipping Overall Output Tokens per Second per GPU calculation.") - shutil.copy2(source_csv, target_csv) - return - - sections = self._read_csv_sections(source_csv) - self._write_sections_with_metric(target_csv, sections, total_gpus) + pass diff --git a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py index a0f21f331..995b02bc5 100644 --- a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,9 +14,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pathlib import Path +import logging +from pathlib import Path, PosixPath from typing import List, cast +from cloudai.core import GitRepo, File from cloudai.systems.slurm import SlurmCommandGenStrategy from .ai_dynamo import AIDynamoTestDefinition, BaseModel @@ -28,31 +30,7 @@ class AIDynamoSlurmCommandGenStrategy(SlurmCommandGenStrategy): def _container_mounts(self) -> list[str]: td = cast(AIDynamoTestDefinition, self.test_run.test) - dynamo_repo_path = td.dynamo_repo.installed_path - if dynamo_repo_path is None: - raise ValueError("dynamo_repo_path is not set - repo may not be installed") - dynamo_repo_path = dynamo_repo_path.absolute() - - mounts = [ - f"{dynamo_repo_path}:{dynamo_repo_path}", - f"{self.system.hf_home_path.absolute()}:{td.cmd_args.huggingface_home_container_path}", - f"{td.script.installed_path.absolute()!s}:{td.script.installed_path.absolute()!s}", - ] - - if td.cmd_args.dynamo.backend == "sglang": - deepep_path = ( - dynamo_repo_path / "components/backends/sglang/configs/deepseek_r1/wideep/deepep.json" - ).absolute() - sgl_http_server_path = ( - dynamo_repo_path / "components/backends/sglang/src/dynamo/sglang/utils/sgl_http_server.py" - ).absolute() - mounts.extend( - [ - f"{deepep_path!s}:{deepep_path!s}", - f"{sgl_http_server_path!s}:{sgl_http_server_path!s}", - ] - ) - return mounts + return [f"{self.system.hf_home_path.absolute()}:{td.cmd_args.dynamo.workspace_path}/hf_home"] def image_path(self) -> str | None: tdef: AIDynamoTestDefinition = cast(AIDynamoTestDefinition, self.test_run.test) @@ -64,14 +42,35 @@ def _get_toml_args(self, base_model: BaseModel, prefix: str, exclude: List[str] args = [] exclude = exclude or [] toml_args = base_model.model_dump(by_alias=True, exclude=set(exclude), exclude_none=True) - args = [f'{prefix}{k} "{v}"' for k, v in toml_args.items()] + for k, v in toml_args.items(): + if isinstance(v, dict): + if "url" in v and "commit" in v and "installed_path" in v and "mount_as" in v: + args.extend([f'{prefix}{k} "{v["mount_as"]}"']) + elif "src" in v and isinstance(v["src"], PosixPath): + args.extend([f'{prefix}{k} "{v["src"].name}"']) + else: + args.append(f'{prefix}{k} "{v}"') + else: + args.append(f'{prefix}{k} "{v}"') return args + def _get_nested_toml_args(self, base_model: BaseModel, prefix: str) -> List[str]: + result = self._get_toml_args(base_model, prefix, exclude=["args"]) + + if hasattr(base_model, "args"): + args = getattr(base_model, "args") + result.extend(self._get_toml_args(base_model.args, prefix + "args-")) + + return result + def _gen_script_args(self, td: AIDynamoTestDefinition) -> List[str]: args = [ - f"--huggingface-home {td.cmd_args.huggingface_home_container_path}", - "--results-dir /cloudai_run_results", + "--user $USER", + f"--install-dir {self.container_install_path}", + f"--huggingface-home {td.cmd_args.dynamo.workspace_path}/hf_home", + f"--results-dir {self.container_results_path}", + f"--dynamo-repo {td.dynamo_repo.container_mount}", ] args.extend( self._get_toml_args( @@ -80,31 +79,17 @@ def _gen_script_args(self, td: AIDynamoTestDefinition) -> List[str]: exclude=[ "prefill_worker", "decode_worker", - "genai_perf", - "workspace_path", - "decode_cmd", - "prefill_cmd", ], ) ) # Add backend-specific args if td.cmd_args.dynamo.backend == "sglang": - dynamo_repo_path = td.dynamo_repo.installed_path - if dynamo_repo_path is None: - raise ValueError("dynamo_repo_path is not set - repo may not be installed") - - deepep_path = getattr(td.cmd_args.dynamo, "deepep_path", None) - if not deepep_path: - deepep_path = ( - dynamo_repo_path / "components/backends/sglang/configs/deepseek_r1/wideep/deepep.json" - ).absolute() - else: - deepep_path = Path(deepep_path).absolute() - + dynamo_repo_path = td.dynamo_repo.container_mount + deepep_path = f"{dynamo_repo_path}/components/backends/sglang/configs/deepseek_r1/wideep/deepep.json" sgl_http_server_path = ( - dynamo_repo_path / "components/backends/sglang/src/dynamo/sglang/utils/sgl_http_server.py" - ).absolute() + f"{dynamo_repo_path}/components/backends/sglang/src/dynamo/sglang/utils/sgl_http_server.py" + ) args.extend( [ @@ -114,9 +99,13 @@ def _gen_script_args(self, td: AIDynamoTestDefinition) -> List[str]: ) if td.cmd_args.dynamo.prefill_worker: - args.extend(self._get_toml_args(td.cmd_args.dynamo.prefill_worker, "--prefill-")) - args.extend(self._get_toml_args(td.cmd_args.dynamo.decode_worker, "--decode-")) - args.extend(self._get_toml_args(td.cmd_args.genai_perf, "--genai-perf-")) + args.extend(self._get_nested_toml_args(td.cmd_args.dynamo.prefill_worker, "--prefill-")) + args.extend(self._get_nested_toml_args(td.cmd_args.dynamo.decode_worker, "--decode-")) + + args.extend(self._get_nested_toml_args(td.cmd_args.lmcache, "--lmcache-")) + args.extend(self._get_nested_toml_args(td.cmd_args.genai_perf, "--genai_perf-")) + args.extend(self._get_nested_toml_args(td.cmd_args.lmbench, "--lmbench-")) + args.extend(self._get_nested_toml_args(td.cmd_args.custom_workload, "--custom_workload-")) return args @@ -124,9 +113,7 @@ def _gen_srun_command(self) -> str: td = cast(AIDynamoTestDefinition, self.test_run.test) num_nodes, node_list = self.get_cached_nodes_spec() - fatal_file_name = "fatal_error.marker" out_dir = self.test_run.output_path.absolute() - fatal_path = f"{out_dir}/{fatal_file_name}" srun_cmd = self.gen_srun_prefix() srun_cmd.extend( @@ -135,35 +122,15 @@ def _gen_srun_command(self) -> str: *([] if not node_list else [f"--nodelist={','.join(node_list)}"]), f"--ntasks={num_nodes}", "--ntasks-per-node=1", - f"--export=ALL,DYNAMO_FATAL_ERROR_FILE={fatal_file_name}", + "--export=ALL", f"--output={out_dir / 'node-%n-stdout.txt'}", f"--error={out_dir / 'node-%n-stderr.txt'}", "bash", - f"{td.script.installed_path.absolute()!s}", + f"{self.container_install_path}/{td.script.src.name}", ] ) srun_cmd.extend(self._gen_script_args(td)) - srun_line = " \\\n ".join(srun_cmd) - - wrapper = [ - "num_retries=${DYNAMO_NUM_RETRY_ON_FAILURE:-0}", - "for try in $(seq 0 $num_retries); do", - ' echo "Try $try of $num_retries"', - f" rm -f {fatal_path} 2>/dev/null || true", - f" {srun_line}", - f" if [ $try -eq $num_retries ] || [ ! -f {fatal_path} ]; then", - " break", - " fi", - ' echo "Fatal error detected. Archiving logs then retrying..."', - f" mkdir -p {out_dir}/error.$try", - f" mv {out_dir}/*.log {out_dir}/error.$try/ 2>/dev/null || true", - f" mv {out_dir}/node-*-stdout.txt {out_dir}/error.$try/ 2>/dev/null || true", - f" mv {out_dir}/node-*-stderr.txt {out_dir}/error.$try/ 2>/dev/null || true", - f" mv {fatal_path} {out_dir}/error.$try/ 2>/dev/null || true", - " sleep ${DYNAMO_RETRY_BACKOFF_SEC:-10}", - "done", - ] - return "\n".join(wrapper) + return " \\\n ".join(srun_cmd) + "\n" def _validate_worker_nodes( self, node_list: list[str], worker_nodes: str | None, num_nodes: int, worker_type: str @@ -209,8 +176,8 @@ def get_cached_nodes_spec(self) -> tuple[int, list[str]]: decode_n = td.cmd_args.dynamo.decode_worker.num_nodes decode_nodes = td.cmd_args.dynamo.decode_worker.nodes - assert isinstance(prefill_n, int), "prefill_worker.num_nodes must be an integer" - assert isinstance(decode_n, int), "decode_worker.num_nodes must be an integer" + assert isinstance(prefill_n, int), "dynamo.num_prefill_nodes must be an integer" + assert isinstance(decode_n, int), "dynamo.num_decode_nodes must be an integer" if prefill_nodes and decode_nodes: self.test_run.nodes = prefill_nodes.split(",") + decode_nodes.split(",") + self.test_run.nodes @@ -222,6 +189,10 @@ def get_cached_nodes_spec(self) -> tuple[int, list[str]]: total_nodes = prefill_n + decode_n + logging.info("Setting num_nodes from %d to %d", self.test_run.num_nodes, total_nodes) + + self.test_run.num_nodes = total_nodes + requested_nodes, node_list = self.system.get_nodes_by_spec(self.test_run.nnodes, self.test_run.nodes) if prefill_nodes or decode_nodes: diff --git a/tests/json_gen_strategy/test_ai_dynamo.py b/tests/json_gen_strategy/test_ai_dynamo.py index 16492a06e..135d773b2 100644 --- a/tests/json_gen_strategy/test_ai_dynamo.py +++ b/tests/json_gen_strategy/test_ai_dynamo.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -27,9 +27,12 @@ AIDynamoCmdArgs, AIDynamoKubernetesJsonGenStrategy, AIDynamoTestDefinition, - DecodeWorkerArgs, - GenAIPerfArgs, - PrefillWorkerArgs, + GenAIPerf, + LMBench, + LMCache, + LMCacheArgs, + WorkerBaseArgs, + WorkerConfig, ) @@ -42,16 +45,20 @@ def dynamo(request: Any) -> AIDynamoTestDefinition: cmd_args=AIDynamoCmdArgs( docker_image_url="nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.6.1.post1", dynamo=AIDynamoArgs( - decode_worker=DecodeWorkerArgs( - num_nodes=2, data_parallel_size=1, tensor_parallel_size=1, extra_args="--extra-decode-arg v" + decode_worker=WorkerConfig( + num_nodes=2, + args=WorkerBaseArgs(data_parallel_size=1, tensor_parallel_size=1), + extra_args="--extra-decode-arg v", ) ), - genai_perf=GenAIPerfArgs(), + genai_perf=GenAIPerf(), + lmcache=LMCache(args=LMCacheArgs()), + lmbench=LMBench(), ), ) if request.param == "disagg": - dynamo.cmd_args.dynamo.prefill_worker = PrefillWorkerArgs( - num_nodes=3, tensor_parallel_size=1, extra_args="--extra-prefill-arg v" + dynamo.cmd_args.dynamo.prefill_worker = WorkerConfig( + num_nodes=3, args=WorkerBaseArgs(tensor_parallel_size=1), extra_args="--extra-prefill-arg v" ) return dynamo @@ -94,7 +101,7 @@ def test_gen_decode(json_gen: AIDynamoKubernetesJsonGenStrategy) -> None: assert decode.get("subComponentType") == "decode-worker" args.append("--is-decode-worker") - for arg, value in dynamo_args_dict(tdef.cmd_args.dynamo.decode_worker).items(): + for arg, value in dynamo_args_dict(tdef.cmd_args.dynamo.decode_worker.args).items(): args.extend([json_gen._to_dynamo_arg(arg), str(value)]) if tdef.cmd_args.dynamo.decode_worker.extra_args: args.append(f"{tdef.cmd_args.dynamo.decode_worker.extra_args}") @@ -139,7 +146,7 @@ def test_gen_prefill(json_gen: AIDynamoKubernetesJsonGenStrategy) -> None: assert prefill.get("subComponentType") == "prefill" args = ["--model", tdef.cmd_args.dynamo.model, "--is-prefill-worker"] - for arg, value in dynamo_args_dict(tdef.cmd_args.dynamo.prefill_worker).items(): + for arg, value in dynamo_args_dict(tdef.cmd_args.dynamo.prefill_worker.args).items(): args.extend([json_gen._to_dynamo_arg(arg), str(value)]) if tdef.cmd_args.dynamo.prefill_worker.extra_args: args.append(f"{tdef.cmd_args.dynamo.prefill_worker.extra_args}") diff --git a/tests/ref_data/ai-dynamo.sbatch b/tests/ref_data/ai-dynamo.sbatch index 61a6e1df3..6e8789656 100644 --- a/tests/ref_data/ai-dynamo.sbatch +++ b/tests/ref_data/ai-dynamo.sbatch @@ -10,51 +10,72 @@ export SLURM_JOB_MASTER_NODE=$(scontrol show hostname $SLURM_JOB_NODELIST | head -n 1) -srun --export=ALL --mpi=pmix -N2 --container-image=nvcr.io/nvidia/ai-dynamo:24.09 --container-mounts=__OUTPUT_DIR__/output:/cloudai_run_results,__OUTPUT_DIR__/install:/cloudai_install,__OUTPUT_DIR__/output,__INSTALL_DIR__:__INSTALL_DIR__,__INSTALL_DIR__/huggingface:/root/.cache/huggingface,__CLOUDAI_DIR__/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh:__CLOUDAI_DIR__/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh --output=__OUTPUT_DIR__/output/mapping-stdout.txt --error=__OUTPUT_DIR__/output/mapping-stderr.txt bash -c "echo \$(date): \$(hostname):node \${SLURM_NODEID}:rank \${SLURM_PROCID}." +srun --export=ALL --mpi=pmix -N2 --container-image=nvcr.io/nvidia/ai-dynamo:24.09 --container-mounts=__OUTPUT_DIR__/output:/cloudai_run_results,__OUTPUT_DIR__/install:/cloudai_install,__OUTPUT_DIR__/output,__INSTALL_DIR__:/git/dynamo,__INSTALL_DIR__/LMCache__ab8530993992db873869ba882320953582d94309:/git/LMCache,__INSTALL_DIR__/LMBenchmark__e1406623c5e88878cf2b7fbd64fe6c47f7dcb66f:/git/LMBenchmark,__INSTALL_DIR__/huggingface:/workspace/hf_home --output=__OUTPUT_DIR__/output/mapping-stdout.txt --error=__OUTPUT_DIR__/output/mapping-stderr.txt bash -c "echo \$(date): \$(hostname):node \${SLURM_NODEID}:rank \${SLURM_PROCID}." -srun --export=ALL --mpi=pmix -N2 --container-image=nvcr.io/nvidia/ai-dynamo:24.09 --container-mounts=__OUTPUT_DIR__/output:/cloudai_run_results,__OUTPUT_DIR__/install:/cloudai_install,__OUTPUT_DIR__/output,__INSTALL_DIR__:__INSTALL_DIR__,__INSTALL_DIR__/huggingface:/root/.cache/huggingface,__CLOUDAI_DIR__/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh:__CLOUDAI_DIR__/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh --ntasks=2 --ntasks-per-node=1 --output=__OUTPUT_DIR__/output/metadata/node-%N.toml --error=__OUTPUT_DIR__/output/metadata/nodes.err bash /cloudai_install/slurm-metadata.sh +srun --export=ALL --mpi=pmix -N2 --container-image=nvcr.io/nvidia/ai-dynamo:24.09 --container-mounts=__OUTPUT_DIR__/output:/cloudai_run_results,__OUTPUT_DIR__/install:/cloudai_install,__OUTPUT_DIR__/output,__INSTALL_DIR__:/git/dynamo,__INSTALL_DIR__/LMCache__ab8530993992db873869ba882320953582d94309:/git/LMCache,__INSTALL_DIR__/LMBenchmark__e1406623c5e88878cf2b7fbd64fe6c47f7dcb66f:/git/LMBenchmark,__INSTALL_DIR__/huggingface:/workspace/hf_home --ntasks=2 --ntasks-per-node=1 --output=__OUTPUT_DIR__/output/metadata/node-%N.toml --error=__OUTPUT_DIR__/output/metadata/nodes.err bash /cloudai_install/slurm-metadata.sh -num_retries=${DYNAMO_NUM_RETRY_ON_FAILURE:-0} -for try in $(seq 0 $num_retries); do - echo "Try $try of $num_retries" - rm -f __OUTPUT_DIR__/output/fatal_error.marker 2>/dev/null || true - srun \ +srun \ --export=ALL \ --mpi=pmix \ -N2 \ --container-image=nvcr.io/nvidia/ai-dynamo:24.09 \ - --container-mounts=__OUTPUT_DIR__/output:/cloudai_run_results,__OUTPUT_DIR__/install:/cloudai_install,__OUTPUT_DIR__/output,__INSTALL_DIR__:__INSTALL_DIR__,__INSTALL_DIR__/huggingface:/root/.cache/huggingface,__CLOUDAI_DIR__/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh:__CLOUDAI_DIR__/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh \ + --container-mounts=__OUTPUT_DIR__/output:/cloudai_run_results,__OUTPUT_DIR__/install:/cloudai_install,__OUTPUT_DIR__/output,__INSTALL_DIR__:/git/dynamo,__INSTALL_DIR__/LMCache__ab8530993992db873869ba882320953582d94309:/git/LMCache,__INSTALL_DIR__/LMBenchmark__e1406623c5e88878cf2b7fbd64fe6c47f7dcb66f:/git/LMBenchmark,__INSTALL_DIR__/huggingface:/workspace/hf_home \ --nodes=2 \ --ntasks=2 \ --ntasks-per-node=1 \ - --export=ALL,DYNAMO_FATAL_ERROR_FILE=fatal_error.marker \ + --export=ALL \ --output=__OUTPUT_DIR__/output/node-%n-stdout.txt \ --error=__OUTPUT_DIR__/output/node-%n-stderr.txt \ bash \ - __CLOUDAI_DIR__/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh \ - --huggingface-home /root/.cache/huggingface \ + /cloudai_install/ai_dynamo.sh \ + --user $USER \ + --install-dir /cloudai_install \ + --huggingface-home /workspace/hf_home \ --results-dir /cloudai_run_results \ + --dynamo-repo /git/dynamo \ --dynamo-model "model" \ --dynamo-backend "vllm" \ + --dynamo-workspace-path "/workspace" \ + --dynamo-decode-cmd "python3 -m dynamo.vllm" \ + --dynamo-prefill-cmd "python3 -m dynamo.vllm --is-prefill-worker" \ --prefill-num-nodes "1" \ --prefill-ServiceArgs "{'workers': 1, 'resources': {'gpu': '8'}}" \ --decode-num-nodes "1" \ --decode-ServiceArgs "{'workers': 1, 'resources': {'gpu': '8'}}" \ - --genai-perf-streaming "True" \ - --genai-perf-extra-inputs "{"temperature": 0.7, "max_tokens": 128}" \ - --genai-perf-output-tokens-mean "128" \ - --genai-perf-random-seed "42" \ - --genai-perf-request-count "100" \ - --genai-perf-synthetic-input-tokens-mean "550" \ - --genai-perf-warmup-request-count "10" - if [ $try -eq $num_retries ] || [ ! -f __OUTPUT_DIR__/output/fatal_error.marker ]; then - break - fi - echo "Fatal error detected. Archiving logs then retrying..." - mkdir -p __OUTPUT_DIR__/output/error.$try - mv __OUTPUT_DIR__/output/*.log __OUTPUT_DIR__/output/error.$try/ 2>/dev/null || true - mv __OUTPUT_DIR__/output/node-*-stdout.txt __OUTPUT_DIR__/output/error.$try/ 2>/dev/null || true - mv __OUTPUT_DIR__/output/node-*-stderr.txt __OUTPUT_DIR__/output/error.$try/ 2>/dev/null || true - mv __OUTPUT_DIR__/output/fatal_error.marker __OUTPUT_DIR__/output/error.$try/ 2>/dev/null || true - sleep ${DYNAMO_RETRY_BACKOFF_SEC:-10} -done \ No newline at end of file + --lmcache-controller_cmd "lmcache_controller --host localhost --port 9000 --monitor-port 9001" \ + --lmcache-repo "/git/LMCache" \ + --lmcache-args-chunk_size "256" \ + --lmcache-args-local_cpu "False" \ + --lmcache-args-nixl_buffer_size "10737418240" \ + --lmcache-args-nixl_buffer_device "cuda" \ + --lmcache-args-extra_config_enable_nixl_storage "True" \ + --lmcache-args-extra_config_nixl_backend "GDS_MT" \ + --lmcache-args-extra_config_nixl_file_pool_size "64" \ + --lmcache-args-extra_config_nixl_path "%CACHEDIR%" \ + --lmcache-args-enable_controller "True" \ + --lmcache-args-lmcache_instance_id "lmcache_default_instance" \ + --lmcache-args-controller_url "localhost:9001" \ + --lmcache-args-lmcache_worker_port "8788" \ + --lmcache-args-distributed_url "localhost:8789" \ + --genai_perf-name "genai_perf" \ + --genai_perf-cmd "genai-perf profile" \ + --genai_perf-script "genai_perf.sh" \ + --genai_perf-report-name "genai_perf_report.csv" \ + --genai_perf-streaming "True" \ + --genai_perf-extra-inputs "{"temperature": 0.7, "max_tokens": 128}" \ + --genai_perf-output-tokens-mean "128" \ + --genai_perf-random-seed "42" \ + --genai_perf-request-count "100" \ + --genai_perf-synthetic-input-tokens-mean "550" \ + --genai_perf-warmup-request-count "10" \ + --lmbench-name "lmbench" \ + --lmbench-cmd "python3 ./synthetic-multi-round-qa/multi-round-qa.py" \ + --lmbench-script "lmbench.sh" \ + --lmbench-report-name "lmbench_report.csv" \ + --lmbench-repo "/git/LMBenchmark" \ + --lmbench-extra_args "0.25 0.5 0.75 1.0 1.25 1.5 1.75 2.0" \ + --lmbench-key "lmbench" \ + --custom_workload-name "custom_workload" \ + --custom_workload-cmd "hostname" \ + --custom_workload-script "custom_workload.sh" \ + --custom_workload-report-name "custom_workload_report.csv" \ No newline at end of file diff --git a/tests/report_generation_strategy/test_ai_dynamo_report_generation_strategy.py b/tests/report_generation_strategy/test_ai_dynamo_report_generation_strategy.py index 2d2cc2cf4..28b08020d 100644 --- a/tests/report_generation_strategy/test_ai_dynamo_report_generation_strategy.py +++ b/tests/report_generation_strategy/test_ai_dynamo_report_generation_strategy.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -25,8 +25,12 @@ AIDynamoArgs, AIDynamoCmdArgs, AIDynamoTestDefinition, - GenAIPerfArgs, - PrefillWorkerArgs, + GenAIPerf, + LMBench, + LMCache, + LMCacheArgs, + WorkerBaseArgs, + WorkerConfig, ) from cloudai.workloads.ai_dynamo.report_generation_strategy import AIDynamoReportGenerationStrategy @@ -41,17 +45,6 @@ def get_csv_content() -> str: "Inter Token Latency (ms),12.34,23.45,34.56,45.67,56.78,67.89,78.90,89.01,90.12\n" "Output Sequence Length (tokens),101.01,202.02,303.03,404.04,505.05,606.06,707.07,808.08,909.09\n" "Input Sequence Length (tokens),123.45,234.56,345.67,456.78,567.89,678.90,789.01,890.12,901.23\n" - "\n" - "Metric,Value\n" - "Output Token Throughput (tokens/sec),24\n" - "Request Throughput (per sec),1.23\n" - "Request Count (count),40.00\n" - "\n" - "Metric,GPU,avg,min,max,p99,p95,p90,p75,p50,p25\n" - "GPU Power Usage (W),0,119.93,117.61,120.81,120.81,120.81,120.81,120.81,120.60,119.85\n" - "GPU Power Usage (W),1,120.50,120.49,120.52,120.52,120.52,120.52,120.52,120.50,120.49\n" - "GPU Memory Used (GB),0,84.11,82.41,84.68,84.68,84.68,84.68,84.68,84.67,84.11\n" - "GPU Memory Used (GB),1,82.44,82.44,82.44,82.44,82.44,82.44,82.44,82.44,82.44\n" ) @@ -63,13 +56,18 @@ def ai_dynamo_tr(tmp_path: Path) -> TestRun: test_template_name="t", cmd_args=AIDynamoCmdArgs( docker_image_url="http://url", - dynamo=AIDynamoArgs(prefill_worker=PrefillWorkerArgs()), - genai_perf=GenAIPerfArgs(), + dynamo=AIDynamoArgs(prefill_worker=WorkerConfig(args=WorkerBaseArgs())), + genai_perf=GenAIPerf(), + lmcache=LMCache(args=LMCacheArgs()), + lmbench=LMBench(), ), ) tr = TestRun(name="ai_dynamo", test=test, num_nodes=1, nodes=[], output_path=tmp_path) csv_content = get_csv_content() + # Create CSV file with the name expected by the new implementation + (tr.output_path / "genai_perf_report.csv").write_text(csv_content) + # Also create the file pattern expected by was_run_successful (tr.output_path / "profile_genai_perf.csv").write_text(csv_content) (tr.output_path / "profile_genai_perf.json").write_text("mock json content") @@ -88,54 +86,28 @@ def test_ai_dynamo_can_handle_directory(slurm_system: SlurmSystem, ai_dynamo_tr: def test_ai_dynamo_generate_report(slurm_system: SlurmSystem, ai_dynamo_tr: TestRun, csv_content: str) -> None: strategy = AIDynamoReportGenerationStrategy(slurm_system, ai_dynamo_tr) + # The new implementation does not generate a report file strategy.generate_report() - - report_file = ai_dynamo_tr.output_path / "report.csv" - assert report_file.is_file(), "Report CSV was not generated." - - report_content = report_file.read_text() - - def split_into_sections(content: str) -> list[str]: - sections = content.split("\n\n") - return [s.strip() for s in sections if s.strip()] - - def normalize_csv_section(section: str) -> str: - return section.replace('"', "").strip() - - actual_sections = [normalize_csv_section(s) for s in split_into_sections(report_content)] - expected_sections = [normalize_csv_section(s) for s in split_into_sections(csv_content)] - - # First section should match after normalization - assert actual_sections[0] == expected_sections[0], "First section (metrics) does not match" - - # Second section should have our additional metric - second_section_lines = actual_sections[1].split("\n") - assert second_section_lines[0] == "Metric,Value", "Second section header does not match" - assert second_section_lines[1] == "Output Token Throughput (tokens/sec),24", "Throughput line does not match" - assert second_section_lines[2] == "Overall Output Tokens per Second per GPU,1.0", "Added metric line is incorrect" - assert second_section_lines[3:] == ["Request Throughput (per sec),1.23", "Request Count (count),40.00"], ( - "Remaining lines do not match" - ) - - # Third section (GPU metrics) should be identical - assert actual_sections[2] == expected_sections[2], "Third section (GPU metrics) does not match" + # Just verify the method runs without error + assert True def test_ai_dynamo_get_metric_single_values(slurm_system: SlurmSystem, ai_dynamo_tr: TestRun) -> None: strategy = AIDynamoReportGenerationStrategy(slurm_system, ai_dynamo_tr) - assert strategy.get_metric("output-token-throughput") == 24.0 - assert strategy.get_metric("request-throughput") == 1.23 - assert strategy.get_metric("default") == 24.0 + # Test that metrics from the first CSV section work + assert strategy.get_metric("Output Sequence Length (tokens)") == 101.01 + assert strategy.get_metric("Input Sequence Length (tokens)") == 123.45 def test_ai_dynamo_get_metric_statistical_values(slurm_system: SlurmSystem, ai_dynamo_tr: TestRun) -> None: strategy = AIDynamoReportGenerationStrategy(slurm_system, ai_dynamo_tr) - assert strategy.get_metric("time-to-first-token") == 111.12 - assert strategy.get_metric("time-to-second-token") == 11.13 - assert strategy.get_metric("request-latency") == 1111.14 - assert strategy.get_metric("inter-token-latency") == 12.34 + # Use exact metric names from CSV (with avg column, which is default) + assert strategy.get_metric("Time To First Token (ms)") == 111.12 + assert strategy.get_metric("Time To Second Token (ms)") == 11.13 + assert strategy.get_metric("Request Latency (ms)") == 1111.14 + assert strategy.get_metric("Inter Token Latency (ms)") == 12.34 def test_ai_dynamo_get_metric_invalid(slurm_system: SlurmSystem, ai_dynamo_tr: TestRun) -> None: @@ -143,8 +115,9 @@ def test_ai_dynamo_get_metric_invalid(slurm_system: SlurmSystem, ai_dynamo_tr: T assert strategy.get_metric("invalid-metric") == METRIC_ERROR - (ai_dynamo_tr.output_path / "profile_genai_perf.csv").write_text("") - assert strategy.get_metric("default") == METRIC_ERROR + # Empty the CSV file to test error handling + (ai_dynamo_tr.output_path / "genai_perf-report.csv").write_text("") + assert strategy.get_metric("invalid-metric") == METRIC_ERROR def test_was_run_successful(ai_dynamo_tr: TestRun) -> None: diff --git a/tests/slurm_command_gen_strategy/test_ai_dynamo_slurm_command_gen_strategy.py b/tests/slurm_command_gen_strategy/test_ai_dynamo_slurm_command_gen_strategy.py index 76df711b2..35c5a90f6 100644 --- a/tests/slurm_command_gen_strategy/test_ai_dynamo_slurm_command_gen_strategy.py +++ b/tests/slurm_command_gen_strategy/test_ai_dynamo_slurm_command_gen_strategy.py @@ -1,5 +1,5 @@ # SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES -# Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -26,9 +26,12 @@ AIDynamoCmdArgs, AIDynamoSlurmCommandGenStrategy, AIDynamoTestDefinition, - DecodeWorkerArgs, - GenAIPerfArgs, - PrefillWorkerArgs, + GenAIPerf, + LMBench, + LMCache, + LMCacheArgs, + WorkerBaseArgs, + WorkerConfig, ) @@ -36,28 +39,35 @@ def cmd_args() -> AIDynamoCmdArgs: return AIDynamoCmdArgs( docker_image_url="url", - huggingface_home_container_path=Path("/root/.cache/huggingface"), dynamo=AIDynamoArgs( model="model", workspace_path="/workspace", - prefill_worker=PrefillWorkerArgs( + prefill_worker=WorkerConfig( **{ "num-nodes": 1, - "gpu-memory-utilization": 0.95, - "tensor-parallel-size": 8, + "args": WorkerBaseArgs( + **{ + "gpu-memory-utilization": 0.95, + "tensor-parallel-size": 8, + } + ), "ServiceArgs": {"workers": 1, "resources": {"gpu": "8"}}, } ), - decode_worker=DecodeWorkerArgs( + decode_worker=WorkerConfig( **{ "num-nodes": 1, - "gpu-memory-utilization": 0.95, - "tensor-parallel-size": 8, + "args": WorkerBaseArgs( + **{ + "gpu-memory-utilization": 0.95, + "tensor-parallel-size": 8, + } + ), "ServiceArgs": {"workers": 1, "resources": {"gpu": "8"}}, } ), ), - genai_perf=GenAIPerfArgs( + genai_perf=GenAIPerf( **{ "endpoint-type": "chat", "streaming": True, @@ -72,6 +82,8 @@ def cmd_args() -> AIDynamoCmdArgs: "request-count": 10, } ), + lmcache=LMCache(args=LMCacheArgs()), + lmbench=LMBench(), ) @@ -99,13 +111,10 @@ def strategy(slurm_system: SlurmSystem, test_run: TestRun) -> AIDynamoSlurmComma def test_container_mounts(strategy: AIDynamoSlurmCommandGenStrategy, test_run: TestRun) -> None: mounts = strategy._container_mounts() td = cast(AIDynamoTestDefinition, test_run.test) - dynamo_repo_path = td.dynamo_repo.installed_path - assert dynamo_repo_path is not None, "dynamo_repo_path should be set in the test fixture" + # _container_mounts returns custom mounts including scripts and HF home (git repos are handled by base class) assert mounts == [ - f"{dynamo_repo_path!s}:{dynamo_repo_path!s}", - f"{strategy.system.hf_home_path.absolute()!s}:{td.cmd_args.huggingface_home_container_path!s}", - f"{td.script.installed_path.absolute()!s}:{td.script.installed_path.absolute()!s}", + f"{strategy.system.hf_home_path.absolute()!s}:{td.cmd_args.dynamo.workspace_path}/hf_home", ] diff --git a/tests/test_acceptance.py b/tests/test_acceptance.py index df3bbc06b..21cfde7ac 100644 --- a/tests/test_acceptance.py +++ b/tests/test_acceptance.py @@ -33,9 +33,12 @@ AIDynamoArgs, AIDynamoCmdArgs, AIDynamoTestDefinition, - DecodeWorkerArgs, - GenAIPerfArgs, - PrefillWorkerArgs, + GenAIPerf, + LMBench, + LMCache, + LMCacheArgs, + WorkerBaseArgs, + WorkerConfig, ) from cloudai.workloads.ddlb import DDLBCmdArgs, DDLBTestDefinition from cloudai.workloads.deepep import ( @@ -446,6 +449,7 @@ def test_req(request, slurm_system: SlurmSystem, partial_tr: partial[TestRun]) - url="https://github.com/ai-dynamo/dynamo.git", commit="f7e468c7e8ff0d1426db987564e60572167e8464", installed_path=slurm_system.install_path, + mount_as="/git/dynamo", ), cmd_args=AIDynamoCmdArgs( docker_image_url="nvcr.io/nvidia/ai-dynamo:24.09", @@ -453,20 +457,22 @@ def test_req(request, slurm_system: SlurmSystem, partial_tr: partial[TestRun]) - model="model", backend="vllm", workspace_path="/workspace", - prefill_worker=PrefillWorkerArgs( + prefill_worker=WorkerConfig( **{ "num-nodes": 1, + "args": WorkerBaseArgs(), "ServiceArgs": {"workers": 1, "resources": {"gpu": "8"}}, } ), - decode_worker=DecodeWorkerArgs( + decode_worker=WorkerConfig( **{ "num-nodes": 1, + "args": WorkerBaseArgs(), "ServiceArgs": {"workers": 1, "resources": {"gpu": "8"}}, } ), ), - genai_perf=GenAIPerfArgs( + genai_perf=GenAIPerf( **{ "streaming": True, "extra-inputs": '{"temperature": 0.7, "max_tokens": 128}', @@ -477,6 +483,8 @@ def test_req(request, slurm_system: SlurmSystem, partial_tr: partial[TestRun]) - "warmup-request-count": 10, } ), + lmcache=LMCache(args=LMCacheArgs()), + lmbench=LMBench(), ), ), ), diff --git a/tests/test_calc_percentile_csv.py b/tests/test_calc_percentile_csv.py new file mode 100644 index 000000000..f67b2f798 --- /dev/null +++ b/tests/test_calc_percentile_csv.py @@ -0,0 +1,92 @@ +# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +# Copyright (c) 2025-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math + +import pytest + +from cloudai.workloads.ai_dynamo.calc_percentile_csv import compute_percentile, parse_float_safe, summarize + + +def test_compute_percentile_empty(): + assert math.isnan(compute_percentile([], 50)) + + +def test_compute_percentile_single_value(): + assert compute_percentile([5.0], 50) == 5.0 + assert compute_percentile([5.0], 0) == 5.0 + assert compute_percentile([5.0], 100) == 5.0 + + +def test_compute_percentile_multiple_values(): + values = [1.0, 2.0, 3.0, 4.0, 5.0] + assert compute_percentile(values, 0) == 1.0 + assert compute_percentile(values, 50) == 3.0 + assert compute_percentile(values, 100) == 5.0 + + +def test_compute_percentile_interpolation(): + values = [1.0, 2.0, 3.0, 4.0] + # Should interpolate between values + result = compute_percentile(values, 50) + assert 2.0 <= result <= 3.0 + + +def test_parse_float_safe_valid(): + assert parse_float_safe("3.14") == 3.14 + assert parse_float_safe(42) == 42.0 + assert parse_float_safe(3.14) == 3.14 + + +def test_parse_float_safe_invalid(): + assert math.isnan(parse_float_safe("invalid")) + assert math.isnan(parse_float_safe(None)) + assert math.isnan(parse_float_safe("")) + + +def test_summarize_empty(): + result = summarize([]) + assert math.isnan(result["avg"]) + assert math.isnan(result["min"]) + assert math.isnan(result["max"]) + assert math.isnan(result["p50"]) + + +def test_summarize_single_value(): + result = summarize([10.0]) + assert result["avg"] == 10.0 + assert result["min"] == 10.0 + assert result["max"] == 10.0 + assert result["p50"] == 10.0 + + +def test_summarize_multiple_values(): + values = [1.0, 2.0, 3.0, 4.0, 5.0] + result = summarize(values) + assert result["avg"] == 3.0 + assert result["min"] == 1.0 + assert result["max"] == 5.0 + assert result["p50"] == 3.0 + assert result["p25"] == 2.0 + assert result["p75"] == 4.0 + + +def test_summarize_percentiles(): + values = [float(x) for x in range(1, 101)] # 1 to 100 + result = summarize(values) + assert result["p1"] == pytest.approx(1.99, abs=0.1) + assert result["p99"] == pytest.approx(99.01, abs=0.1) + assert result["p50"] == pytest.approx(50.5, abs=0.1) From aa41405e0bbfd91b9a6adf630ec24795f8c38c99 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Sat, 31 Jan 2026 20:23:01 -0800 Subject: [PATCH 02/22] Fix CI validation issues for AI Dynamo - Remove unused imports (field_serializer, GitRepo, File) - Fix code formatting (ruff format) - Fix TOML indentation (taplo fmt) - Fix pyright type error in _get_nested_toml_args - Fix GitRepo dict check to not require installed_path - Update test reference file for LMBench field changes Co-Authored-By: Claude Opus 4.5 --- conf/experimental/ai_dynamo/test/vllm.toml | 12 +++++----- .../ai_dynamo/test_scenario/vllm_k8s.toml | 8 +++---- .../ai_dynamo/test_scenario/vllm_slurm.toml | 24 +++++++++---------- src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 1 - .../workloads/ai_dynamo/openai_chat_client.py | 4 ++-- .../ai_dynamo/slurm_command_gen_strategy.py | 8 +++---- tests/ref_data/ai-dynamo.sbatch | 3 +-- 7 files changed, 28 insertions(+), 32 deletions(-) diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index 56db3654f..6708d54be 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -31,12 +31,12 @@ docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.7.0" [cmd_args.dynamo.decode_worker] num-nodes = 1 - [cmd_args.dynamo.decode_worker.args] - model = "Qwen/Qwen3-0.6B" - gpu-memory-utilization = 0.95 - tensor-parallel-size = 8 - pipeline-parallel-size = 1 - data-parallel-size = 1 + [cmd_args.dynamo.decode_worker.args] + model = "Qwen/Qwen3-0.6B" + gpu-memory-utilization = 0.95 + tensor-parallel-size = 8 + pipeline-parallel-size = 1 + data-parallel-size = 1 [cmd_args.genai_perf] model = "Qwen/Qwen3-0.6B" diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_k8s.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_k8s.toml index f557bac39..c8fdcdad6 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_k8s.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_k8s.toml @@ -24,10 +24,10 @@ test_name = "vLLM-Qwen3-0.6B" [Tests.cmd_args.dynamo] [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 1 - [Tests.cmd_args.dynamo.prefill_worker.args] - tensor-parallel-size = 8 + [Tests.cmd_args.dynamo.prefill_worker.args] + tensor-parallel-size = 8 [Tests.cmd_args.dynamo.decode_worker] num-nodes = 1 - [Tests.cmd_args.dynamo.decode_worker.args] - tensor-parallel-size = 8 + [Tests.cmd_args.dynamo.decode_worker.args] + tensor-parallel-size = 8 diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml index e1062c3bb..93f08dd4b 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml @@ -25,15 +25,15 @@ time_limit = "00:10:00" [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 1 - [Tests.cmd_args.dynamo.prefill_worker.args] - tensor-parallel-size = 4 - pipeline-parallel-size = 1 + [Tests.cmd_args.dynamo.prefill_worker.args] + tensor-parallel-size = 4 + pipeline-parallel-size = 1 [Tests.cmd_args.dynamo.decode_worker] num-nodes = 1 - [Tests.cmd_args.dynamo.decode_worker.args] - tensor-parallel-size = 4 - pipeline-parallel-size = 1 + [Tests.cmd_args.dynamo.decode_worker.args] + tensor-parallel-size = 4 + pipeline-parallel-size = 1 [[Tests]] id = "test.disagg.multinode" @@ -43,12 +43,12 @@ time_limit = "00:10:00" [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 2 - [Tests.cmd_args.dynamo.prefill_worker.args] - tensor-parallel-size = 4 - pipeline-parallel-size = 1 + [Tests.cmd_args.dynamo.prefill_worker.args] + tensor-parallel-size = 4 + pipeline-parallel-size = 1 [Tests.cmd_args.dynamo.decode_worker] num-nodes = 2 - [Tests.cmd_args.dynamo.decode_worker.args] - tensor-parallel-size = 4 - pipeline-parallel-size = 1 + [Tests.cmd_args.dynamo.decode_worker.args] + tensor-parallel-size = 4 + pipeline-parallel-size = 1 diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index ed642f372..c3993798f 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -23,7 +23,6 @@ BaseModel, ConfigDict, Field, - field_serializer, model_validator, ) diff --git a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py index 50a350667..aa86b649f 100644 --- a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py +++ b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py @@ -158,7 +158,7 @@ def parse_args() -> argparse.Namespace: def main() -> None: args = parse_args() - result= { + result = { "isl": args.isl, "context_tokens": 0, "baseline_cached_tokens": 0, @@ -209,7 +209,7 @@ def main() -> None: out_path = Path(args.out) with out_path.open("a", encoding="utf-8") as f: if out_path.suffix == ".csv": - line=",".join(str(v) for v in result.values()) + line = ",".join(str(v) for v in result.values()) f.write(line + "\n") else: json.dump(result, f, indent=2) diff --git a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py index 995b02bc5..699b84221 100644 --- a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py @@ -18,7 +18,6 @@ from pathlib import Path, PosixPath from typing import List, cast -from cloudai.core import GitRepo, File from cloudai.systems.slurm import SlurmCommandGenStrategy from .ai_dynamo import AIDynamoTestDefinition, BaseModel @@ -44,7 +43,7 @@ def _get_toml_args(self, base_model: BaseModel, prefix: str, exclude: List[str] toml_args = base_model.model_dump(by_alias=True, exclude=set(exclude), exclude_none=True) for k, v in toml_args.items(): if isinstance(v, dict): - if "url" in v and "commit" in v and "installed_path" in v and "mount_as" in v: + if "url" in v and "commit" in v and "mount_as" in v: args.extend([f'{prefix}{k} "{v["mount_as"]}"']) elif "src" in v and isinstance(v["src"], PosixPath): args.extend([f'{prefix}{k} "{v["src"].name}"']) @@ -58,9 +57,8 @@ def _get_toml_args(self, base_model: BaseModel, prefix: str, exclude: List[str] def _get_nested_toml_args(self, base_model: BaseModel, prefix: str) -> List[str]: result = self._get_toml_args(base_model, prefix, exclude=["args"]) - if hasattr(base_model, "args"): - args = getattr(base_model, "args") - result.extend(self._get_toml_args(base_model.args, prefix + "args-")) + if hasattr(base_model, "args") and (nested_args := getattr(base_model, "args", None)) is not None: + result.extend(self._get_toml_args(nested_args, prefix + "args-")) return result diff --git a/tests/ref_data/ai-dynamo.sbatch b/tests/ref_data/ai-dynamo.sbatch index 6e8789656..cfcf0ad09 100644 --- a/tests/ref_data/ai-dynamo.sbatch +++ b/tests/ref_data/ai-dynamo.sbatch @@ -73,8 +73,7 @@ srun \ --lmbench-script "lmbench.sh" \ --lmbench-report-name "lmbench_report.csv" \ --lmbench-repo "/git/LMBenchmark" \ - --lmbench-extra_args "0.25 0.5 0.75 1.0 1.25 1.5 1.75 2.0" \ - --lmbench-key "lmbench" \ + --lmbench-qps "0.25,0.5,0.75,1.0,1.25,1.5,1.75,2.0" \ --custom_workload-name "custom_workload" \ --custom_workload-cmd "hostname" \ --custom_workload-script "custom_workload.sh" \ From c68c14dc32a1bd276aaa6981bcc39bd699e424fe Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Sat, 31 Jan 2026 21:14:13 -0800 Subject: [PATCH 03/22] Fix clear_kv_cache and add explicit port configuration - Fix clear_kv_cache to use /clear_kv_blocks endpoint (not /reset_prefix_cache) - Add explicit port fields to AIDynamoArgs (port, etcd_port, nats_port) - Pass connector and kvbm_metrics_port from ai_dynamo.sh to workload scripts - Update custom_workload.sh to receive all port config via command line args - Only fetch KVBM metrics when connector is kvbm and port is specified Co-Authored-By: Claude Opus 4.5 --- src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 14 +++++ src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 2 + .../workloads/ai_dynamo/custom_workload.sh | 55 +++++++++++++------ tests/ref_data/ai-dynamo.sbatch | 3 + 4 files changed, 57 insertions(+), 17 deletions(-) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index c3993798f..55adda7ca 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -144,6 +144,20 @@ class AIDynamoArgs(BaseModel): serialization_alias="workspace-path", validation_alias=AliasChoices("workspace-path", "workspace_path"), ) + port: int = Field( + default=8000, + description="Dynamo frontend HTTP API port", + ) + etcd_port: int = Field( + default=2379, + serialization_alias="etcd-port", + validation_alias=AliasChoices("etcd-port", "etcd_port"), + ) + nats_port: int = Field( + default=4222, + serialization_alias="nats-port", + validation_alias=AliasChoices("nats-port", "nats_port"), + ) decode_worker: WorkerConfig = Field(default_factory=WorkerConfig) decode_cmd: str = Field( default="python3 -m dynamo.vllm", diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index dafc0592e..cd505ba8b 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -990,6 +990,8 @@ function launch_workload() --port '"${dynamo_args["port"]}"' \ --endpoint '"${dynamo_args["endpoint"]}"' \ --gpus_per_node '"$(_gpus_per_node)"' \ + --connector '"${dynamo_args["connector"]}"' \ + --kvbm_metrics_port '"${DYN_KVBM_METRICS_PORT:-6880}"' \ $expanded_config \ -- $expanded_arguments '>' ${RESULTS_DIR}/$workload_name.log '2>&1' diff --git a/src/cloudai/workloads/ai_dynamo/custom_workload.sh b/src/cloudai/workloads/ai_dynamo/custom_workload.sh index 503682332..701c9c4c8 100644 --- a/src/cloudai/workloads/ai_dynamo/custom_workload.sh +++ b/src/cloudai/workloads/ai_dynamo/custom_workload.sh @@ -10,6 +10,8 @@ model="" url="" port="" endpoint="" +connector="" +kvbm_metrics_port="" all_isl="" declare -A workload_args @@ -55,6 +57,14 @@ function process_args() endpoint="$2" shift 2 ;; + --connector) + connector="$2" + shift 2 + ;; + --kvbm_metrics_port) + kvbm_metrics_port="$2" + shift 2 + ;; --result_dir) result_dir="$2" shift 2 @@ -90,6 +100,8 @@ function process_args() url: $url port: $port endpoint: $endpoint + connector: $connector + kvbm_metrics_port: $kvbm_metrics_port result_dir: $result_dir install_dir: $install_dir report_name: $report_name @@ -114,30 +126,39 @@ function process_args() function clear_kv_cache() { - # Use default ports if env vars not set - local metrics_port="${DYN_METRICS_PORT:-${port}}" # Default to the main API port - local kvbm_metrics_port="${DYN_KVBM_METRICS_PORT:-6880}" # Default KVBM metrics port - - local dyn_metrics_endpoint="${url}:${metrics_port}" - local kvbm_metrics_endpoint="${url}:${kvbm_metrics_port}/metrics" - - # This clears G1 (GPU) + G2 (CPU) + G3 (Disk) at once - status=$(curl -s ${dyn_metrics_endpoint}/metrics 2>/dev/null | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") - log "KV cache status before clear: $status" + # All ports are passed via command line arguments from ai_dynamo.sh + # - port: Dynamo frontend API port (for /clear_kv_blocks endpoint) + # - kvbm_metrics_port: KVBM metrics port (for cache hit rate metrics) + + if [[ -z "$port" ]]; then + log "ERROR: API port not specified, skipping KV cache clear" + return 1 + fi - response=$(curl -s -X POST ${dyn_metrics_endpoint}/reset_prefix_cache 2>/dev/null || echo "endpoint unavailable") - log "KV prefix cache reset. Response: $response" + local api_endpoint="${url}:${port}" + + # Check KV cache status before clearing (metrics on main API port) + status=$(curl -s ${api_endpoint}/metrics 2>/dev/null | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") + log "KV cache status before clear: $status" - response=$(curl -s -X POST ${dyn_metrics_endpoint}/clear_kv_blocks 2>/dev/null || echo "endpoint unavailable") + # Clear KV blocks via the dynamo HTTP endpoint + # This internally calls reset_prefix_cache() on all workers + response=$(curl -s -X POST ${api_endpoint}/clear_kv_blocks 2>/dev/null || echo "endpoint unavailable") log "KV blocks cleared. Response: $response" - status=$(curl -s ${dyn_metrics_endpoint}/metrics 2>/dev/null | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") + # Check KV cache status after clearing + status=$(curl -s ${api_endpoint}/metrics 2>/dev/null | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") log "KV cache status after clear: $status" - status=$(curl -s ${kvbm_metrics_endpoint} | grep -E "host_cache_hit_rate|disk_cache_hit_rate") - log "KVBM cache hit rates after clear: $status" + # Check KVBM-specific metrics if connector is kvbm and port is specified + if [[ "$connector" == "kvbm" && -n "$kvbm_metrics_port" ]]; then + local kvbm_metrics_endpoint="${url}:${kvbm_metrics_port}/metrics" + status=$(curl -s ${kvbm_metrics_endpoint} 2>/dev/null | grep -E "host_cache_hit_rate|disk_cache_hit_rate" || echo "kvbm metrics unavailable") + log "KVBM cache hit rates after clear: $status" + fi - # if [[ "${dynamo_args["connector"]}" == "lmcache" ]]; then + # TODO: Add LMCache clearing when connector is lmcache + # if [[ "$connector" == "lmcache" ]]; then # clear_lmcache # fi } diff --git a/tests/ref_data/ai-dynamo.sbatch b/tests/ref_data/ai-dynamo.sbatch index cfcf0ad09..332a0b0ac 100644 --- a/tests/ref_data/ai-dynamo.sbatch +++ b/tests/ref_data/ai-dynamo.sbatch @@ -36,6 +36,9 @@ srun \ --dynamo-model "model" \ --dynamo-backend "vllm" \ --dynamo-workspace-path "/workspace" \ + --dynamo-port "8000" \ + --dynamo-etcd-port "2379" \ + --dynamo-nats-port "4222" \ --dynamo-decode-cmd "python3 -m dynamo.vllm" \ --dynamo-prefill-cmd "python3 -m dynamo.vllm --is-prefill-worker" \ --prefill-num-nodes "1" \ From 0d1782054e4e259d0d3096a48550e3a85a58b17f Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Mon, 2 Feb 2026 06:36:35 +0000 Subject: [PATCH 04/22] Fix binary search infinite loop in compute_kv_cache_token_size Signed-off-by: Kapil Arya --- .../workloads/ai_dynamo/openai_chat_client.py | 55 ++++++++++++++++--- 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py index aa86b649f..b51ddd8bb 100644 --- a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py +++ b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py @@ -33,7 +33,7 @@ from transformers import AutoTokenizer # type: ignore[import-untyped] # ---------------------------------------------------------------------- -FILLER_LEN_CHARS = 10_000 # ≈ length of each cache-filler prompt +NUM_FILLER_TOKENS = 10_000 # ≈ length of each cache-filler prompt NUM_FILLER_PROMPTS = 100 # how many fillers to send for eviction # ---------------------------------------------------------------------- @@ -123,7 +123,7 @@ def stream(self, client: OpenAI, max_tokens: int) -> TtftStats: def flush_kv_cache(args: argparse.Namespace, client: OpenAI) -> None: """Flush KV cache by sending filler prompts.""" for _ in range(args.num_filler_prompts): - _ = Chat(args.filler_len_chars, args.model, args.max_ctx_tokens).stream(client, 1) + _ = Chat(args.num_filler_tokens, args.model, args.max_ctx_tokens).stream(client, 1) # ---------------- command-line parsing -------------------------------- @@ -146,14 +146,48 @@ def parse_args() -> argparse.Namespace: help="Number of filler prompts to send for cache flush.", ) ap.add_argument( - "--filler_len_chars", + "--num_filler_tokens", type=int, - default=FILLER_LEN_CHARS, - help="Length of filler prompt in characters.", + default=NUM_FILLER_TOKENS, + help="Number of filler tokens.", ) + ap.add_argument("--compute_kv_cache_token_size", action="store_true", help="Compute KV cache token size and exit.") return ap.parse_args() +def SendFillerQueries(args: argparse.Namespace, client: OpenAI, num: int): + for _ in range(num): + _ = Chat(args.num_filler_tokens, args.model, args.max_ctx_tokens).stream(client, 1) + + +def compute_kv_cache_token_size(args: argparse.Namespace, client: OpenAI) -> int: + # We want to compute the number of tokens required to flush the KV cache. To + # do this, we start by sending a canary query with 1000 tokens. + # Next we send a filler queries with 10000 tokens and after each query we + # send the original query again aand measure the cached_tokens. If + # cached_tokens is not zero, we increase the number of filler queries and + # repeat. At some point, the cached_tokens for the original query will be + # zero and we have the number of filler queries required to flush the KV + # cache. + canary_chat = Chat(1000, args.model, args.max_ctx_tokens) + canary_stats = canary_chat.stream(client, 1) + print(f"Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") + + # Do a binary search for the number of filler prompts required to flush the KV cache. + maxFillerPrompts = 10_000 + minFillerPrompts = 1 + while minFillerPrompts < maxFillerPrompts: + numFillerPrompts = (maxFillerPrompts + minFillerPrompts) // 2 + SendFillerQueries(args, client, numFillerPrompts) + canary_stats = canary_chat.stream(client, 1) + print(f"Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") + if canary_stats.cached_tokens < 500: + maxFillerPrompts = numFillerPrompts + else: + minFillerPrompts = numFillerPrompts + 1 + return minFillerPrompts * args.num_filler_tokens + + # ---------------- main routine ---------------------------------------- def main() -> None: args = parse_args() @@ -169,6 +203,15 @@ def main() -> None: "post_flush_ttft_seconds": 0, } + client = OpenAI(base_url=args.url, api_key="dummy-key-for-local-server") + + if args.compute_kv_cache_token_size: + kv_cache_token_size = compute_kv_cache_token_size(args, client); + print(f"KV cache token size: {kv_cache_token_size}") + with Path(args.out).open("a", encoding="utf-8") as f: + f.write(f"KV cache token size: {kv_cache_token_size}\n") + return + if args.dump_csv_header: with Path(args.out).open("a", encoding="utf-8") as f: f.write(",".join(result.keys())) @@ -177,8 +220,6 @@ def main() -> None: chat = Chat(args.isl, args.model, args.max_ctx_tokens) - client = OpenAI(base_url=args.url, api_key="dummy-key-for-local-server") - # ---------------- RUN 1 ---------------- print("\n=== Run 1: baseline TTFT ===") baseline = chat.stream(client, args.osl) From 0acad0bf1a5968215fbcf4ab8a345694826222fb Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 3 Feb 2026 22:57:30 +0200 Subject: [PATCH 05/22] Misc changes. --- src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 31 ++++--- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 24 +++--- .../workloads/ai_dynamo/custom_workload.sh | 24 +++++- .../workloads/ai_dynamo/openai_chat_client.py | 81 ++++++++++++------- .../ai_dynamo/report_generation_strategy.py | 4 - .../ai_dynamo/slurm_command_gen_strategy.py | 7 +- 6 files changed, 112 insertions(+), 59 deletions(-) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index 55adda7ca..d5a18d804 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -37,9 +37,6 @@ ) from cloudai.models.workload import CmdArgs, TestDefinition -from .report_generation_strategy import CSV_FILES_PATTERN, JSON_FILES_PATTERN - - class Args(BaseModel): """Arguments for custom workloads.""" @@ -264,7 +261,7 @@ class AIDynamoCmdArgs(CmdArgs): """Arguments for AI Dynamo.""" docker_image_url: str - storage_cache_dir: Optional[str] = None + storage_cache_dir: Optional[Path] = Field(default=None, serialization_alias="storage-cache-dir") num_nodes: int = 1 gpus_per_node: int = 8 dynamo: AIDynamoArgs @@ -291,6 +288,14 @@ class AIDynamoTestDefinition(TestDefinition): workloads: str = "genai_perf.sh,lmbench.sh,custom_workload.sh" constraints: Constraints = Constraints() + def get_workload_map(self) -> dict[str, Workload]: + """Get a map of workload scripts to workload objects.""" + return { + self.cmd_args.genai_perf.script.src.name: self.cmd_args.genai_perf, + self.cmd_args.lmbench.script.src.name: self.cmd_args.lmbench, + self.cmd_args.custom_workload.script.src.name: self.cmd_args.custom_workload, + } + @model_validator(mode="after") def validate_test_definition(self) -> "AIDynamoTestDefinition": """Validate test definition.""" @@ -346,13 +351,17 @@ def installables(self) -> list[Installable]: def was_run_successful(self, tr: TestRun) -> JobStatusResult: output_path = tr.output_path - csv_files = list(output_path.rglob(CSV_FILES_PATTERN)) - json_files = list(output_path.rglob(JSON_FILES_PATTERN)) - logging.debug(f"Found CSV files in {output_path.absolute()}: {csv_files}, JSON files: {json_files}") - has_results = len(csv_files) > 0 and len(json_files) > 0 - if not has_results: - return JobStatusResult(False, "No result files found in the output directory.") - return JobStatusResult(True) + result = True + workload_map = self.get_workload_map() + for workload in workload_map.keys(): + workload_csv_file = output_path / workload_map[workload].report_name + if not workload_csv_file.exists() or workload_csv_file.stat().st_size == 0: + logging.info(f"No result file found for workload: {workload}") + result = False + else: + logging.info(f"Result file ({workload_csv_file.absolute()}) exists and is not empty for workload: {workload}") + + return JobStatusResult(result) def constraint_check(self, tr: TestRun) -> bool: prefill_worker = tr.test.cmd_args.dynamo.prefill_worker diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index cd505ba8b..e36b105da 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -703,6 +703,7 @@ function wait_for_frontend_marker() function mark_done() { + sleep 10000 touch "$DONE_MARKER" } @@ -901,9 +902,11 @@ function setup_storage_cache_dir() function setup_kvbm() { if [[ "${dynamo_args["connector"]}" != "kvbm" ]]; then + log "Connector is not kvbm. Skipping setup_kvbm" return fi + log "Setting up KVBM storage cache directory: ${STORAGE_CACHE_DIR}" setup_storage_cache_dir export DYN_KVBM_DISK_CACHE_DIR=${STORAGE_CACHE_DIR} setup_cufile @@ -912,11 +915,12 @@ function setup_kvbm() function setup_lmcache() { if [[ "${dynamo_args["connector"]}" != "lmcache" ]]; then + log "Connector is not lmcache. Skipping setup_lmcache" return fi + log "Setting up LMCache; installing LMCache using: uv pip install $lmcache_path" local lmcache_path="${lmcache_config["repo"]}" - log "Installing LMCache using: uv pip install $lmcache_path" uv pip install -e $lmcache_path setup_storage_cache_dir @@ -982,18 +986,18 @@ function launch_workload() log "Launching $workload_name with cmd: ${INSTALL_DIR}/$script $expanded_config -- $expanded_arguments" # Use eval to properly handle values with spaces in expanded_config - eval bash ${INSTALL_DIR}/$script \ + bash ${INSTALL_DIR}/$script \ --install_dir "$INSTALL_DIR" \ --result_dir "$RESULTS_DIR" \ - --model '"${dynamo_args["model"]}"' \ - --url '"http://${dynamo_args["frontend-node"]}"' \ - --port '"${dynamo_args["port"]}"' \ - --endpoint '"${dynamo_args["endpoint"]}"' \ - --gpus_per_node '"$(_gpus_per_node)"' \ - --connector '"${dynamo_args["connector"]}"' \ - --kvbm_metrics_port '"${DYN_KVBM_METRICS_PORT:-6880}"' \ + --model "${dynamo_args["model"]}" \ + --url "http://${dynamo_args["frontend-node"]}" \ + --port "${dynamo_args["port"]}" \ + --endpoint "${dynamo_args["endpoint"]}" \ + --gpus_per_node "$(_gpus_per_node)" \ + --connector "${dynamo_args["connector"]}" \ + --kvbm_metrics_port "${DYN_KVBM_METRICS_PORT:-6880}" \ $expanded_config \ - -- $expanded_arguments '>' ${RESULTS_DIR}/$workload_name.log '2>&1' + -- $expanded_arguments > ${RESULTS_DIR}/$workload_name.log 2>&1 log "Done with $workload_name run" } diff --git a/src/cloudai/workloads/ai_dynamo/custom_workload.sh b/src/cloudai/workloads/ai_dynamo/custom_workload.sh index 701c9c4c8..22eb4f4ea 100644 --- a/src/cloudai/workloads/ai_dynamo/custom_workload.sh +++ b/src/cloudai/workloads/ai_dynamo/custom_workload.sh @@ -168,11 +168,29 @@ function main() process_args "$@" report_file=$result_dir/$report_name + kv_cache_token_size_file=$result_dir/kv_cache_token_size.out + num_filler_prompts=0 + num_filler_tokens=32000 - log "Launching custom workload with ISLs: $all_isl" + log "Computing KV cache token size" + python3 ${install_dir}/openai_chat_client.py \ + --model $model \ + --url $url:$port/v1 \ + --osl 10 \ + --out $kv_cache_token_size_file \ + --compute_kv_cache_token_size \ + --num_filler_tokens $num_filler_tokens \ + --max_filler_prompts 200 \ + --min_filler_prompts 10 + + kv_cache_token_size=$(grep cache $kv_cache_token_size_file | cut -d':' -f 2 | tr -d ' ') + num_filler_prompts=$((kv_cache_token_size / num_filler_tokens)) + log "KV cache token size: $kv_cache_token_size, num filler prompts: $num_filler_prompts, num filler tokens: $num_filler_tokens" + log "Dumping CSV header" python3 ${install_dir}/openai_chat_client.py --dump_csv_header --out $report_file + log "Launching custom workload with ISLs: $all_isl" for isl in $(echo $all_isl | tr ',' '\n'); do log "Launching custom workload with ISL: $isl" python3 ${install_dir}/openai_chat_client.py \ @@ -181,8 +199,8 @@ function main() --isl $isl \ --osl 10 \ --out $report_file \ - --num_filler_prompts 100 \ - --filler_len_chars 10000 + --num_filler_prompts $num_filler_prompts \ + --num_filler_tokens $num_filler_tokens clear_kv_cache done diff --git a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py index b51ddd8bb..6d45c1670 100644 --- a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py +++ b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py @@ -22,6 +22,7 @@ # Standard import argparse import json +import os import random import string import sys @@ -41,10 +42,10 @@ # ---------------- helper utilities ------------------------------------ -def log_jsonl(path: Path, rec: dict) -> None: - with path.open("a", encoding="utf-8") as fh: - json.dump(rec, fh) - fh.write("\n") +def log(message: str) -> None: + print(f"[{time.strftime('%Y-%m-%d %H:%M:%S') } {os.getenv('HOSTNAME') or ''}]: [openai_chat_client] {message}") + sys.stdout.flush() + sys.stderr.flush() class TtftStats: @@ -67,8 +68,9 @@ def __init__(self, isl: int, model_id: str, max_ctx_tokens: int): raw_doc = "".join(random.choices(string.ascii_letters + string.digits, k=self.isl * 4)) - ids = self.tok.encode(raw_doc, add_special_tokens=False, truncation=True, max_length=self.isl) - assert len(ids) == self.isl, f"Expected {self.isl} tokens, got {len(ids)}" + num_tokens = self.isl - 37 + ids = self.tok.encode(raw_doc, add_special_tokens=False, truncation=True, max_length=num_tokens) + assert len(ids) == num_tokens, f"Expected {num_tokens} tokens, got {len(ids)}" doc = self.tok.decode(ids, skip_special_tokens=True) self.messages = [ @@ -93,6 +95,8 @@ def stream(self, client: OpenAI, max_tokens: int) -> TtftStats: first_tok_t: float | None = None for chunk in stream: + if first_tok_t is None and chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: + first_tok_t = time.perf_counter() # Capture prompt_tokens from usage if available if chunk.usage and chunk.usage.prompt_tokens: stats.prompt_tokens = chunk.usage.prompt_tokens @@ -100,8 +104,6 @@ def stream(self, client: OpenAI, max_tokens: int) -> TtftStats: usage_details = chunk.usage and chunk.usage.prompt_tokens_details if usage_details and usage_details.cached_tokens is not None: stats.cached_tokens = usage_details.cached_tokens - if first_tok_t is None and chunk.choices and chunk.choices[0].delta and chunk.choices[0].delta.content: - first_tok_t = time.perf_counter() if first_tok_t is None: raise RuntimeError("no tokens returned") @@ -109,13 +111,13 @@ def stream(self, client: OpenAI, max_tokens: int) -> TtftStats: stats.ttft_seconds = round(first_tok_t - start, 3) return stats except json.JSONDecodeError as e: - print(f"Error: JSON decode error during streaming: {e}") - print("This may indicate empty SSE events from the server - likely a server-side bug") + log(f"Error: JSON decode error during streaming: {e}") + log("This may indicate empty SSE events from the server - likely a server-side bug") # Return partial stats with error indication stats.ttft_seconds = -1 # Indicate error return stats except Exception as e: - print(f"Error during streaming: {type(e).__name__}: {e}") + log(f"Error during streaming: {type(e).__name__}: {e}") stats.ttft_seconds = -1 # Indicate error return stats @@ -139,6 +141,8 @@ def parse_args() -> argparse.Namespace: ap.add_argument("--isl", type=int, help="Input tokens.") ap.add_argument("--osl", type=int, help="Output tokens.") ap.add_argument("--out", help="JSONL file for results.") + ap.add_argument("--max_filler_prompts", type=int, default=1000, help="Max number of filler prompts (used to compute the KV cache token size) to send for cache flush.") + ap.add_argument("--min_filler_prompts", type=int, default=10, help="Min number of filler prompts (used to compute the KV cache token size) to send for cache flush.") ap.add_argument( "--num_filler_prompts", type=int, @@ -169,23 +173,31 @@ def compute_kv_cache_token_size(args: argparse.Namespace, client: OpenAI) -> int # repeat. At some point, the cached_tokens for the original query will be # zero and we have the number of filler queries required to flush the KV # cache. + + # Do a binary search for the number of filler prompts required to flush the KV cache. + # We use multiple of 10 to speed up the search. + maxFillerPrompts = args.max_filler_prompts // 10 + minFillerPrompts = min(10, args.min_filler_prompts // 10) + log(f"Doing binary search for the number of filler prompts required to flush the KV cache between {minFillerPrompts} and {maxFillerPrompts}...") + + log(f"Sending an initialcanary query with 1000 tokens...") canary_chat = Chat(1000, args.model, args.max_ctx_tokens) canary_stats = canary_chat.stream(client, 1) - print(f"Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") + log(f"Initial Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") - # Do a binary search for the number of filler prompts required to flush the KV cache. - maxFillerPrompts = 10_000 - minFillerPrompts = 1 while minFillerPrompts < maxFillerPrompts: - numFillerPrompts = (maxFillerPrompts + minFillerPrompts) // 2 + numFillerPrompts = (maxFillerPrompts + minFillerPrompts) // 2 * 10 + log(f"Trying {numFillerPrompts} filler prompts...") SendFillerQueries(args, client, numFillerPrompts) + log(f"Sending canary query after {numFillerPrompts} filler prompts...") canary_stats = canary_chat.stream(client, 1) - print(f"Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") + log(f"Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") if canary_stats.cached_tokens < 500: - maxFillerPrompts = numFillerPrompts + maxFillerPrompts = numFillerPrompts // 10 else: - minFillerPrompts = numFillerPrompts + 1 - return minFillerPrompts * args.num_filler_tokens + minFillerPrompts = numFillerPrompts // 10 + 1 + log(f"Min filler prompts: {minFillerPrompts * 10}, Max filler prompts: {maxFillerPrompts * 10}") + return minFillerPrompts * 10 * args.num_filler_tokens # ---------------- main routine ---------------------------------------- @@ -194,11 +206,13 @@ def main() -> None: result = { "isl": args.isl, - "context_tokens": 0, + "baseline_prompt_tokens": 0, "baseline_cached_tokens": 0, "baseline_ttft_seconds": 0, + "no_flush_prompt_tokens": 0, "no_flush_cached_tokens": 0, "no_flush_ttft_seconds": 0, + "post_flush_prompt_tokens": 0, "post_flush_cached_tokens": 0, "post_flush_ttft_seconds": 0, } @@ -206,8 +220,9 @@ def main() -> None: client = OpenAI(base_url=args.url, api_key="dummy-key-for-local-server") if args.compute_kv_cache_token_size: + log(f"Computing KV cache token size...") kv_cache_token_size = compute_kv_cache_token_size(args, client); - print(f"KV cache token size: {kv_cache_token_size}") + log(f"KV cache token size: {kv_cache_token_size}") with Path(args.out).open("a", encoding="utf-8") as f: f.write(f"KV cache token size: {kv_cache_token_size}\n") return @@ -220,30 +235,36 @@ def main() -> None: chat = Chat(args.isl, args.model, args.max_ctx_tokens) + # Flush cache + log(f"Flushing KV-cache with {args.num_filler_prompts} prompts …") + flush_kv_cache(args, client) + # ---------------- RUN 1 ---------------- - print("\n=== Run 1: baseline TTFT ===") + log("=== Run 1: baseline TTFT ===") baseline = chat.stream(client, args.osl) - print(f"Run 1: TTFT = {baseline.ttft_seconds:.3f}s with {baseline.cached_tokens} cached tokens") + log(f"Run 1: TTFT = {baseline.ttft_seconds:.3f}s with {baseline.cached_tokens} cached tokens") # Run 2 with same doc without cache flush - print("\n=== Run 2: TTFT without cache flush ===") + log("=== Run 2: TTFT without cache flush ===") no_flush = chat.stream(client, args.osl) - print(f"Run 2: TTFT = {no_flush.ttft_seconds:.3f}s with {no_flush.cached_tokens} cached tokens") + log(f"Run 2: TTFT = {no_flush.ttft_seconds:.3f}s with {no_flush.cached_tokens} cached tokens") # Flush cache - print(f"\nFlushing KV-cache with {NUM_FILLER_PROMPTS} prompts …") + log(f"Flushing KV-cache with {args.num_filler_prompts} prompts …") flush_kv_cache(args, client) # Run 3 with same doc with cache flush - print("\n=== Run 3: TTFT with cache flush ===") + log("=== Run 3: TTFT with cache flush ===") post_flush = chat.stream(client, args.osl) - print(f"Run 3: TTFT = {post_flush.ttft_seconds:.3f}s with {post_flush.cached_tokens} cached tokens") + log(f"Run 3: TTFT = {post_flush.ttft_seconds:.3f}s with {post_flush.cached_tokens} cached tokens") - result["context_tokens"] = baseline.prompt_tokens + result["baseline_prompt_tokens"] = baseline.prompt_tokens result["baseline_cached_tokens"] = baseline.cached_tokens result["baseline_ttft_seconds"] = baseline.ttft_seconds + result["no_flush_prompt_tokens"] = no_flush.prompt_tokens result["no_flush_cached_tokens"] = no_flush.cached_tokens result["no_flush_ttft_seconds"] = no_flush.ttft_seconds + result["post_flush_prompt_tokens"] = post_flush.prompt_tokens result["post_flush_cached_tokens"] = post_flush.cached_tokens result["post_flush_ttft_seconds"] = post_flush.ttft_seconds diff --git a/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py b/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py index b28995c2a..6c5cafe0c 100644 --- a/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py @@ -27,10 +27,6 @@ if TYPE_CHECKING: pass -CSV_FILES_PATTERN = "profile*_genai_perf.csv" -JSON_FILES_PATTERN = "profile*_genai_perf.json" - - class AIDynamoReportGenerationStrategy(ReportGenerationStrategy): """Strategy for generating reports from AI Dynamo run directories.""" diff --git a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py index 699b84221..f3db954b4 100644 --- a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py @@ -29,7 +29,12 @@ class AIDynamoSlurmCommandGenStrategy(SlurmCommandGenStrategy): def _container_mounts(self) -> list[str]: td = cast(AIDynamoTestDefinition, self.test_run.test) - return [f"{self.system.hf_home_path.absolute()}:{td.cmd_args.dynamo.workspace_path}/hf_home"] + result = [f"{self.system.hf_home_path.absolute()}:{td.cmd_args.dynamo.workspace_path}/hf_home"] + + if td.cmd_args.storage_cache_dir: + result.append(f"{td.cmd_args.storage_cache_dir.absolute()}:{td.cmd_args.storage_cache_dir.absolute()}") + + return result def image_path(self) -> str | None: tdef: AIDynamoTestDefinition = cast(AIDynamoTestDefinition, self.test_run.test) From d5e24133e63266f7857302f01f2a46996ac10655 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Mon, 9 Feb 2026 20:41:50 +0000 Subject: [PATCH 06/22] KVCache flusher and hf_home fix. --- src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 18 +- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 22 +- .../workloads/ai_dynamo/custom_workload.sh | 200 +++++++++++++++--- .../workloads/ai_dynamo/openai_chat_client.py | 75 ++++--- .../ai_dynamo/slurm_command_gen_strategy.py | 14 +- 5 files changed, 253 insertions(+), 76 deletions(-) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index d5a18d804..42d48dd42 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -261,7 +261,8 @@ class AIDynamoCmdArgs(CmdArgs): """Arguments for AI Dynamo.""" docker_image_url: str - storage_cache_dir: Optional[Path] = Field(default=None, serialization_alias="storage-cache-dir") + hf_home_path: Optional[str] = Field(default=None, serialization_alias="hf_home_path") + storage_cache_dir: Optional[str] = Field(default=None, serialization_alias="storage_cache_dir") num_nodes: int = 1 gpus_per_node: int = 8 dynamo: AIDynamoArgs @@ -329,6 +330,7 @@ def docker_image(self) -> DockerImage: @property def hf_model(self) -> HFModel: if not self._hf_model: + logging.info(f"Creating HFModel for: {self.cmd_args.dynamo.model}") self._hf_model = HFModel(model_name=self.cmd_args.dynamo.model) return self._hf_model @@ -338,7 +340,7 @@ def installables(self) -> list[Installable]: result = [ self.docker_image, self.script, - self.hf_model, + #self.hf_model, self.genai_perf_script, self.calc_percentile_csv, self.cmd_args.lmbench.script, @@ -353,13 +355,17 @@ def was_run_successful(self, tr: TestRun) -> JobStatusResult: output_path = tr.output_path result = True workload_map = self.get_workload_map() - for workload in workload_map.keys(): + for workload in tr.test.workloads.split(","): + if workload not in workload_map: + logging.info(f"Workload {workload} not found in workload map") + result = False + continue workload_csv_file = output_path / workload_map[workload].report_name - if not workload_csv_file.exists() or workload_csv_file.stat().st_size == 0: - logging.info(f"No result file found for workload: {workload}") + if not workload_csv_file.exists(): + logging.info(f"Result file ({workload_csv_file.absolute()}) not found for workload: {workload}") result = False else: - logging.info(f"Result file ({workload_csv_file.absolute()}) exists and is not empty for workload: {workload}") + logging.info(f"Result file ({workload_csv_file.absolute()}) exists for {workload}") return JobStatusResult(result) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index e36b105da..f157b9950 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -3,6 +3,7 @@ # CloudAI params RESULTS_DIR="/cloudai_run_results" INSTALL_DIR="/cloudai_install" +STORAGE_CACHE_DIR="/cloudai_install/storage_cache" HUGGINGFACE_HOME="/root/.cache/huggingface" DONE_MARKER="dynamo_frontend_done.marker" FATAL_ERROR_MARKER="dynamo_fatal_error.marker" @@ -177,8 +178,10 @@ _parse_cli_pairs() { custom_workload_args["--${key#--custom_workload-args-}"]="$2" ;; --custom_workload-*) custom_workload_config["--${key#--custom_workload-}"]="$2" ;; - --huggingface-home) + --hf-home) HUGGINGFACE_HOME="$2" ;; + --storage-cache-dir) + STORAGE_CACHE_DIR="$2" ;; --results-dir) RESULTS_DIR="$2" ;; --install-dir) @@ -558,6 +561,7 @@ _is_custom_workload_workload() { _init_runtime_env() { if _is_vllm; then export HF_HOME="${HUGGINGFACE_HOME}" + hf cache scan fi export NATS_SERVER="nats://${dynamo_args["frontend-node"]}:${dynamo_args["nats-port"]}" export ETCD_ENDPOINTS="http://${dynamo_args["frontend-node"]}:${dynamo_args["etcd-port"]}" @@ -703,7 +707,6 @@ function wait_for_frontend_marker() function mark_done() { - sleep 10000 touch "$DONE_MARKER" } @@ -750,6 +753,14 @@ function launch_decode() local base_kv_event_port=${DYN_VLLM_KV_EVENT_PORT:-20080} log "Launching $workers_per_node decode worker(s) with unique port ranges" + local connector="${dynamo_args["connector"]}" + if [[ "$connector" != "none" ]]; then + log "Setting connector for decode workers to $connector" + decode_args["--connector"]="$connector" + else + log "Setting connector for decode workers to $connector" + fi + for i in $(seq 0 $(( $workers_per_node - 1 ))); do local gpu_list=$(_gpu_list_for_worker "${decode_config["gpus-per-worker"]}" "$i") local log_file=$(_log_file_for_worker "decode" "$i") @@ -788,6 +799,11 @@ function launch_prefill() local base_kv_event_port=${DYN_VLLM_KV_EVENT_PORT:-20080} log "Launching $workers_per_node prefill worker(s) with unique port ranges" + local connector="${dynamo_args["connector"]}" + if [[ "$connector" != "none" ]]; then + prefill_args["--connector"]="$connector" + fi + for i in $(seq 0 $(( $workers_per_node - 1 ))); do local gpu_list=$(_gpu_list_for_worker "${prefill_config["gpus-per-worker"]}" "$i") local log_file=$(_log_file_for_worker "prefill" "$i") @@ -893,7 +909,7 @@ EOF function setup_storage_cache_dir() { # Use a global variable that can be exported - STORAGE_CACHE_DIR="${dynamo_args["storage-cache-dir"]}/${TEST_USER}/${dynamo_args["frontend-node"]}/${dynamo_args["connector"]}/cache" + STORAGE_CACHE_DIR="$STORAGE_CACHE_DIR/${TEST_USER}/${dynamo_args["frontend-node"]}/${dynamo_args["connector"]}/cache" rm -rf ${STORAGE_CACHE_DIR} mkdir -p ${STORAGE_CACHE_DIR} chmod 755 ${STORAGE_CACHE_DIR} diff --git a/src/cloudai/workloads/ai_dynamo/custom_workload.sh b/src/cloudai/workloads/ai_dynamo/custom_workload.sh index 22eb4f4ea..e22875566 100644 --- a/src/cloudai/workloads/ai_dynamo/custom_workload.sh +++ b/src/cloudai/workloads/ai_dynamo/custom_workload.sh @@ -14,6 +14,13 @@ connector="" kvbm_metrics_port="" all_isl="" declare -A workload_args +kv_cache_token_size=0 +num_filler_tokens=32000 +g1_token_size=0 +g2_token_size=0 +g3_token_size=0 +bytes_per_token=0 +dyn_system_port="${DYN_SYSTEM_PORT:-8081}" # Simple log function @@ -65,6 +72,10 @@ function process_args() kvbm_metrics_port="$2" shift 2 ;; + --dyn_system_port) + dyn_system_port="$2" + shift 2 + ;; --result_dir) result_dir="$2" shift 2 @@ -81,6 +92,14 @@ function process_args() all_isl="$2" shift 2 ;; + --kv_cache_token_size) + kv_cache_token_size="$2" + shift 2 + ;; + --num_filler_tokens) + num_filler_tokens="$2" + shift 2 + ;; --) shift parse_custom_workload_args "$@" @@ -105,6 +124,8 @@ function process_args() result_dir: $result_dir install_dir: $install_dir report_name: $report_name + kv_cache_token_size: $kv_cache_token_size + num_filler_tokens: $num_filler_tokens isl: $all_isl workload_args: $(for key in "${!workload_args[@]}"; do echo -n "$key: ${workload_args[$key]} "; done) """ @@ -124,38 +145,39 @@ function process_args() # log "LMCache cleared. Response: $response" #} -function clear_kv_cache() +function print_metrics() { - # All ports are passed via command line arguments from ai_dynamo.sh - # - port: Dynamo frontend API port (for /clear_kv_blocks endpoint) - # - kvbm_metrics_port: KVBM metrics port (for cache hit rate metrics) + local frontend_metrics_endpoint="${url}:${port}/metrics" + local component_metrics_endpoint="${url}:${dyn_system_port}/metrics" + local kvbm_metrics_endpoint="${url}:${kvbm_metrics_port}/metrics" + + #status=$(curl -s ${frontend_metrics_endpoint} 2>/dev/null | grep -E "cache.*model=") # | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") + #log "Frontend metrics: $status" + #status=$(curl -s ${component_metrics_endpoint} 2>/dev/null | grep -E "cache.*model=") # | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") + #log "Component metrics: $status" + + status=$(curl -s ${kvbm_metrics_endpoint} 2>/dev/null ) # | grep -E "host_cache_hit_rate|disk_cache_hit_rate" || echo "kvbm metrics unavailable") + log "KVBM metrics: $status" +} + +function clear_kv_cache() +{ if [[ -z "$port" ]]; then log "ERROR: API port not specified, skipping KV cache clear" return 1 fi - local api_endpoint="${url}:${port}" - - # Check KV cache status before clearing (metrics on main API port) - status=$(curl -s ${api_endpoint}/metrics 2>/dev/null | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") - log "KV cache status before clear: $status" + log "Metrics before clear:" + print_metrics # Clear KV blocks via the dynamo HTTP endpoint # This internally calls reset_prefix_cache() on all workers - response=$(curl -s -X POST ${api_endpoint}/clear_kv_blocks 2>/dev/null || echo "endpoint unavailable") + response=$(curl -s -X POST ${url}:${dyn_system_port}/engine/clear_kv_blocks 2>/dev/null || echo "endpoint unavailable") log "KV blocks cleared. Response: $response" - # Check KV cache status after clearing - status=$(curl -s ${api_endpoint}/metrics 2>/dev/null | grep -E "kvstats_active_blocks|kvstats_total_blocks" || echo "metrics unavailable") - log "KV cache status after clear: $status" - - # Check KVBM-specific metrics if connector is kvbm and port is specified - if [[ "$connector" == "kvbm" && -n "$kvbm_metrics_port" ]]; then - local kvbm_metrics_endpoint="${url}:${kvbm_metrics_port}/metrics" - status=$(curl -s ${kvbm_metrics_endpoint} 2>/dev/null | grep -E "host_cache_hit_rate|disk_cache_hit_rate" || echo "kvbm metrics unavailable") - log "KVBM cache hit rates after clear: $status" - fi + log "Metrics after clear:" + print_metrics # TODO: Add LMCache clearing when connector is lmcache # if [[ "$connector" == "lmcache" ]]; then @@ -163,16 +185,106 @@ function clear_kv_cache() # fi } -function main() +function compute_kv_cache_token_size_from_log() { - process_args "$@" + # Parse the decode worker log to extract G1 (GPU) KV cache information + # Log format examples: + # INFO gpu_worker.determine_available_memory: Available KV cache memory: 54.90 GiB (per GPU!) + # INFO kv_cache_utils._report_kv_cache_config: GPU KV cache size: 3,198,272 tokens (total for worker) + # + # IMPORTANT: "Available KV cache memory" is PER GPU, while "GPU KV cache size" is for the entire + # worker (total tokens across all GPUs). We need to multiply memory by tensor_parallel_size. - report_file=$result_dir/$report_name - kv_cache_token_size_file=$result_dir/kv_cache_token_size.out - num_filler_prompts=0 - num_filler_tokens=32000 + log "Computing KV cache token sizes from worker log files..." + + # Find decode worker log file(s) in result_dir + local decode_log=$(find $result_dir -name "dynamo_decode_0_0.log" 2>/dev/null | head -1) + if [[ -z "$decode_log" ]]; then + log "WARNING: No decode worker log found in $result_dir, falling back to query-based method" + return 1 + fi + + log "Using decode worker log: $decode_log" + + # Extract tensor_parallel_size from log: "tensor_parallel_size=8" + local tp_size=1 + local tp_line=$(grep -o "tensor_parallel_size=[0-9]*" "$decode_log" | head -1) + if [[ -n "$tp_line" ]]; then + tp_size=$(echo "$tp_line" | cut -d'=' -f2) + log "Tensor parallel size from log: $tp_size GPUs" + else + log "WARNING: Could not find tensor_parallel_size in log, assuming 1 GPU" + fi + + # Extract G1 token count: "GPU KV cache size: 3,198,272 tokens" + # This is the TOTAL token capacity for the entire worker (across all GPUs) + local g1_tokens_line=$(grep "GPU KV cache size:" "$decode_log" | tail -1) + if [[ -z "$g1_tokens_line" ]]; then + log "WARNING: Could not find 'GPU KV cache size' in log, falling back to query-based method" + return 1 + fi + + # Parse: extract number, remove commas + g1_token_size=$(echo "$g1_tokens_line" | sed -E 's/.*GPU KV cache size: ([0-9,]+) tokens.*/\1/' | tr -d ',') + log "G1 (GPU) token size from log: $g1_token_size tokens (total for worker)" - log "Computing KV cache token size" + # Extract G1 memory size per GPU: "Available KV cache memory: 54.90 GiB" + # This is PER GPU, so we need to multiply by tensor_parallel_size + local g1_memory_line=$(grep "Available KV cache memory:" "$decode_log" | tail -1) + if [[ -z "$g1_memory_line" ]]; then + log "WARNING: Could not find 'Available KV cache memory' in log, falling back to query-based method" + return 1 + fi + + # Parse: extract the GiB value (this is per GPU) + local g1_memory_per_gpu_gib=$(echo "$g1_memory_line" | sed -E 's/.*Available KV cache memory: ([0-9.]+) GiB.*/\1/') + log "G1 (GPU) memory per GPU from log: $g1_memory_per_gpu_gib GiB" + + # Calculate total G1 memory across all GPUs + local g1_memory_total_gib=$(awk "BEGIN {printf \"%.2f\", $g1_memory_per_gpu_gib * $tp_size}") + log "G1 (GPU) total memory across $tp_size GPUs: $g1_memory_total_gib GiB" + + # Calculate bytes per token = (total_G1_GiB * 1024^3) / G1_tokens + # Using awk to handle the initial float-to-int conversion from g1_memory_per_gpu_gib + bytes_per_token=$(awk "BEGIN {printf \"%d\", ($g1_memory_total_gib * 1024 * 1024 * 1024) / $g1_token_size}") + log "Calculated bytes per token: $bytes_per_token" + + # Calculate G2 (CPU) token size from DYN_KVBM_CPU_CACHE_GB environment variable + local g2_cache_gb=${DYN_KVBM_CPU_CACHE_GB:-0} + if [[ "$g2_cache_gb" != "0" && -n "$g2_cache_gb" ]]; then + # G2_tokens = (G2_GB * 1024^3) / bytes_per_token + g2_token_size=$(( (g2_cache_gb * 1024 * 1024 * 1024) / bytes_per_token )) + log "G2 (CPU) cache: $g2_cache_gb GB = $g2_token_size tokens" + else + log "G2 (CPU) cache not configured (DYN_KVBM_CPU_CACHE_GB not set)" + fi + + # Calculate G3 (Disk) token size from DYN_KVBM_DISK_CACHE_GB environment variable + local g3_cache_gb=${DYN_KVBM_DISK_CACHE_GB:-0} + if [[ "$g3_cache_gb" != "0" && -n "$g3_cache_gb" ]]; then + # G3_tokens = (G3_GB * 1024^3) / bytes_per_token + g3_token_size=$(( (g3_cache_gb * 1024 * 1024 * 1024) / bytes_per_token )) + log "G3 (Disk) cache: $g3_cache_gb GB = $g3_token_size tokens" + else + log "G3 (Disk) cache not configured (DYN_KVBM_DISK_CACHE_GB not set)" + fi + + kv_cache_token_size=$(( g1_token_size + g2_token_size )) + + log "KV cache summary:" + log " G1 (GPU): $g1_token_size tokens (${g1_memory_per_gpu_gib} GiB/GPU x $tp_size GPUs = ${g1_memory_total_gib} GiB total)" + log " G2 (CPU): $g2_token_size tokens (${g2_cache_gb} GB)" + log " G3 (Disk): $g3_token_size tokens (${g3_cache_gb} GB)" + log " Total: $kv_cache_token_size tokens" + log " Bytes per token: $bytes_per_token" + return 0 +} + +function compute_kv_cache_token_size_from_query() +{ + # Fallback: compute by sending queries (original method) + local kv_cache_token_size_file=$result_dir/kv_cache_token_size.out + log "Computing KV cache token size via queries..." python3 ${install_dir}/openai_chat_client.py \ --model $model \ --url $url:$port/v1 \ @@ -184,8 +296,34 @@ function main() --min_filler_prompts 10 kv_cache_token_size=$(grep cache $kv_cache_token_size_file | cut -d':' -f 2 | tr -d ' ') - num_filler_prompts=$((kv_cache_token_size / num_filler_tokens)) - log "KV cache token size: $kv_cache_token_size, num filler prompts: $num_filler_prompts, num filler tokens: $num_filler_tokens" + log "KV cache token size from queries: $kv_cache_token_size" +} + +function compute_kv_cache_token_size() +{ + if [[ $kv_cache_token_size -gt 0 ]]; then + log "KV cache token size already provided: $kv_cache_token_size" + return + fi + + # Try to get from log files first (faster, no queries needed) + if compute_kv_cache_token_size_from_log; then + log "Successfully computed KV cache token size from log files" + else + # Fallback to query-based method + log "Falling back to query-based KV cache token size computation" + compute_kv_cache_token_size_from_query + fi +} + +function main() +{ + process_args "$@" + + report_file=$result_dir/$report_name + + compute_kv_cache_token_size + local num_filler_prompts=$(( 1 + (kv_cache_token_size / num_filler_tokens) )) log "Dumping CSV header" python3 ${install_dir}/openai_chat_client.py --dump_csv_header --out $report_file @@ -197,11 +335,13 @@ function main() --model $model \ --url $url:$port/v1 \ --isl $isl \ - --osl 10 \ + --osl 1 \ --out $report_file \ --num_filler_prompts $num_filler_prompts \ --num_filler_tokens $num_filler_tokens + log "Sleeping for 5 seconds before clearing KV cache" + sleep 5 clear_kv_cache done diff --git a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py index 6d45c1670..640722782 100644 --- a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py +++ b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py @@ -60,11 +60,10 @@ def __init__(self, ttft_seconds: float, prompt_tokens: int, cached_tokens: int): class Chat: """Represents a chat context with a document for TTFT benchmarking.""" - def __init__(self, isl: int, model_id: str, max_ctx_tokens: int): + def __init__(self, model: str, isl: int): self.isl = isl - self.model_id = model_id - self.max_ctx_tokens = max_ctx_tokens - self.tok = AutoTokenizer.from_pretrained(model_id, use_fast=True) + self.model = model + self.tok = AutoTokenizer.from_pretrained(self.model, use_fast=True) raw_doc = "".join(random.choices(string.ascii_letters + string.digits, k=self.isl * 4)) @@ -85,7 +84,7 @@ def stream(self, client: OpenAI, max_tokens: int) -> TtftStats: start = time.perf_counter() try: stream = client.chat.completions.create( - model=self.model_id, + model=self.model, messages=self.messages, temperature=0.0, stream=True, @@ -122,10 +121,17 @@ def stream(self, client: OpenAI, max_tokens: int) -> TtftStats: return stats -def flush_kv_cache(args: argparse.Namespace, client: OpenAI) -> None: - """Flush KV cache by sending filler prompts.""" - for _ in range(args.num_filler_prompts): - _ = Chat(args.num_filler_tokens, args.model, args.max_ctx_tokens).stream(client, 1) +class KVCacheFlusher: + def __init__(self, args: argparse.Namespace, client: OpenAI): + self.client = client + self.args = args + self.filler_chats = [Chat(args.model, args.num_filler_tokens) for _ in range(args.num_filler_prompts)] + + def flush(self) -> None: + log(f"Stream {self.args.num_filler_prompts} filler chats with {self.args.num_filler_tokens} tokens each...") + for n, chat in enumerate(self.filler_chats): + stats = chat.stream(self.client, 1) + #log(f"Filler chat {n + 1} of {len(self.filler_chats)}: {stats.ttft_seconds:.3f}s with {stats.cached_tokens} cached tokens") # ---------------- command-line parsing -------------------------------- @@ -137,12 +143,11 @@ def parse_args() -> argparse.Namespace: ap.add_argument("--dump_csv_header", action="store_true", help="Only dump CSV header and exit.") ap.add_argument("--url", help="URL of the API endpoint.") ap.add_argument("--model", help="Model name/ID.") - ap.add_argument("--max_ctx_tokens", type=int, default=131_072, help="Max context tokens.") ap.add_argument("--isl", type=int, help="Input tokens.") ap.add_argument("--osl", type=int, help="Output tokens.") ap.add_argument("--out", help="JSONL file for results.") - ap.add_argument("--max_filler_prompts", type=int, default=1000, help="Max number of filler prompts (used to compute the KV cache token size) to send for cache flush.") - ap.add_argument("--min_filler_prompts", type=int, default=10, help="Min number of filler prompts (used to compute the KV cache token size) to send for cache flush.") + ap.add_argument("--max_filler_prompts", type=int, default=200, help="Max number of filler prompts (used to compute the KV cache token size) to send for cache flush.") + ap.add_argument("--min_filler_prompts", type=int, default=1, help="Min number of filler prompts (used to compute the KV cache token size) to send for cache flush.") ap.add_argument( "--num_filler_prompts", type=int, @@ -160,8 +165,9 @@ def parse_args() -> argparse.Namespace: def SendFillerQueries(args: argparse.Namespace, client: OpenAI, num: int): - for _ in range(num): - _ = Chat(args.num_filler_tokens, args.model, args.max_ctx_tokens).stream(client, 1) + for n in range(num): + log(f"Sending filler query {n + 1} of {num}...") + _ = Chat(args.model, args.isl).stream(client, 1) def compute_kv_cache_token_size(args: argparse.Namespace, client: OpenAI) -> int: @@ -175,29 +181,28 @@ def compute_kv_cache_token_size(args: argparse.Namespace, client: OpenAI) -> int # cache. # Do a binary search for the number of filler prompts required to flush the KV cache. - # We use multiple of 10 to speed up the search. - maxFillerPrompts = args.max_filler_prompts // 10 - minFillerPrompts = min(10, args.min_filler_prompts // 10) + maxFillerPrompts = args.max_filler_prompts + minFillerPrompts = min(1, args.min_filler_prompts) log(f"Doing binary search for the number of filler prompts required to flush the KV cache between {minFillerPrompts} and {maxFillerPrompts}...") log(f"Sending an initialcanary query with 1000 tokens...") - canary_chat = Chat(1000, args.model, args.max_ctx_tokens) + canary_chat = Chat(args.model, args.isl) canary_stats = canary_chat.stream(client, 1) log(f"Initial Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") while minFillerPrompts < maxFillerPrompts: - numFillerPrompts = (maxFillerPrompts + minFillerPrompts) // 2 * 10 - log(f"Trying {numFillerPrompts} filler prompts...") + numFillerPrompts = (maxFillerPrompts + minFillerPrompts) // 2 + log(f"Trying {numFillerPrompts} filler prompts with {args.num_filler_tokens} tokens each...") SendFillerQueries(args, client, numFillerPrompts) log(f"Sending canary query after {numFillerPrompts} filler prompts...") canary_stats = canary_chat.stream(client, 1) log(f"Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") if canary_stats.cached_tokens < 500: - maxFillerPrompts = numFillerPrompts // 10 + maxFillerPrompts = numFillerPrompts else: - minFillerPrompts = numFillerPrompts // 10 + 1 - log(f"Min filler prompts: {minFillerPrompts * 10}, Max filler prompts: {maxFillerPrompts * 10}") - return minFillerPrompts * 10 * args.num_filler_tokens + minFillerPrompts = numFillerPrompts + 1 + log(f"Min filler prompts: {minFillerPrompts}, Max filler prompts: {maxFillerPrompts}") + return minFillerPrompts * args.num_filler_tokens # ---------------- main routine ---------------------------------------- @@ -206,13 +211,10 @@ def main() -> None: result = { "isl": args.isl, - "baseline_prompt_tokens": 0, "baseline_cached_tokens": 0, "baseline_ttft_seconds": 0, - "no_flush_prompt_tokens": 0, "no_flush_cached_tokens": 0, "no_flush_ttft_seconds": 0, - "post_flush_prompt_tokens": 0, "post_flush_cached_tokens": 0, "post_flush_ttft_seconds": 0, } @@ -233,11 +235,15 @@ def main() -> None: f.write("\n") return - chat = Chat(args.isl, args.model, args.max_ctx_tokens) + chat = Chat(args.model, args.isl) # Flush cache - log(f"Flushing KV-cache with {args.num_filler_prompts} prompts …") - flush_kv_cache(args, client) + #log(f"Flushing KV-cache with {args.num_filler_prompts} prompts …") + #flusher.flush() + + log("=== Run 1: warmup ===") + warmup = Chat(args.model, args.isl).stream(client, 1) + log(f"Run 1: warmup: TTFT = {warmup.ttft_seconds:.3f}s with {warmup.cached_tokens} cached tokens") # ---------------- RUN 1 ---------------- log("=== Run 1: baseline TTFT ===") @@ -251,20 +257,21 @@ def main() -> None: # Flush cache log(f"Flushing KV-cache with {args.num_filler_prompts} prompts …") - flush_kv_cache(args, client) + KVCacheFlusher(args, client).flush() # Run 3 with same doc with cache flush + log("=== Run 3: warmup ===") + warmup = Chat(args.model, args.isl).stream(client, 1) + log(f"Run 3: warmup: TTFT = {warmup.ttft_seconds:.3f}s with {warmup.cached_tokens} cached tokens") + log("=== Run 3: TTFT with cache flush ===") post_flush = chat.stream(client, args.osl) log(f"Run 3: TTFT = {post_flush.ttft_seconds:.3f}s with {post_flush.cached_tokens} cached tokens") - result["baseline_prompt_tokens"] = baseline.prompt_tokens result["baseline_cached_tokens"] = baseline.cached_tokens result["baseline_ttft_seconds"] = baseline.ttft_seconds - result["no_flush_prompt_tokens"] = no_flush.prompt_tokens result["no_flush_cached_tokens"] = no_flush.cached_tokens result["no_flush_ttft_seconds"] = no_flush.ttft_seconds - result["post_flush_prompt_tokens"] = post_flush.prompt_tokens result["post_flush_cached_tokens"] = post_flush.cached_tokens result["post_flush_ttft_seconds"] = post_flush.ttft_seconds diff --git a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py index f3db954b4..04630bab7 100644 --- a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py @@ -29,10 +29,15 @@ class AIDynamoSlurmCommandGenStrategy(SlurmCommandGenStrategy): def _container_mounts(self) -> list[str]: td = cast(AIDynamoTestDefinition, self.test_run.test) - result = [f"{self.system.hf_home_path.absolute()}:{td.cmd_args.dynamo.workspace_path}/hf_home"] + result = list[str]() + + logging.info(f"hf_home_path: {td.cmd_args.hf_home_path}") + logging.info(f"storage_cache_dir: {td.cmd_args.storage_cache_dir}") + if td.cmd_args.hf_home_path: + result.append(f"{td.cmd_args.hf_home_path}:{td.cmd_args.hf_home_path}") if td.cmd_args.storage_cache_dir: - result.append(f"{td.cmd_args.storage_cache_dir.absolute()}:{td.cmd_args.storage_cache_dir.absolute()}") + result.append(f"{td.cmd_args.storage_cache_dir}:{td.cmd_args.storage_cache_dir}") return result @@ -71,10 +76,13 @@ def _gen_script_args(self, td: AIDynamoTestDefinition) -> List[str]: args = [ "--user $USER", f"--install-dir {self.container_install_path}", - f"--huggingface-home {td.cmd_args.dynamo.workspace_path}/hf_home", f"--results-dir {self.container_results_path}", f"--dynamo-repo {td.dynamo_repo.container_mount}", ] + if td.cmd_args.hf_home_path: + args.append(f"--hf-home {td.cmd_args.hf_home_path}") + if td.cmd_args.storage_cache_dir: + args.append(f"--storage-cache-dir {td.cmd_args.storage_cache_dir}") args.extend( self._get_toml_args( td.cmd_args.dynamo, From 31b1d93cf52a65628918baaadde9861d38ba2073 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 04:45:00 +0000 Subject: [PATCH 07/22] Fixed prefill nodelist computation. --- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index f157b9950..02d4c9907 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -205,8 +205,9 @@ _populate_nodelist() { local count=0 local nodelist="" - for node in $(echo $DYNAMO_NODELIST | cut -d',' -f1-${num_nodes}); do - if ! echo "${exclude_nodelist}" | grep -q "$node"; then + for node in $(echo "$DYNAMO_NODELIST" | tr ',' ' '); do + if [[ -z "$node" ]]; then continue; fi + if ! echo ",${exclude_nodelist}," | grep -q ",$node,"; then nodelist+="$node," count=$(( count + 1 )) if [[ "$count" -eq "${num_nodes}" ]]; then From 803a383ec1672e787883bd196447e0edb6eca861 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 04:59:54 +0000 Subject: [PATCH 08/22] Several shell script fixes. --- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 46 ++++++++++--------- .../workloads/ai_dynamo/custom_workload.sh | 2 +- src/cloudai/workloads/ai_dynamo/genai_perf.sh | 6 +-- src/cloudai/workloads/ai_dynamo/lmbench.sh | 8 ++-- 4 files changed, 32 insertions(+), 30 deletions(-) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index 02d4c9907..0a31c7d66 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -85,7 +85,7 @@ _csv_index_of() { local IFS=',' arr i read -ra arr <<< "$list" for i in "${!arr[@]}"; do - if [[ "${arr[$i]}" == "$name" || "${arr[$i]}" == *"$name"* || "$name" == *"${arr[$i]}"* ]]; then + if [[ "${arr[$i]}" == "$name" ]]; then echo "$i"; return 0 fi done @@ -114,15 +114,17 @@ _apply_sglang_section_args() { # prefill group local prefill_nodes="${prefill_config["num-nodes"]}" - local prefill_master_host="$(_first_in_csv "${prefill_config["node-list"]}")" - local prefill_master_ip="$(_resolve_host_ip "${prefill_master_host}")" - local prefill_rank="$(_csv_index_of "${prefill_config["node-list"]}" "$self")" - local prefill_total_gpus=$(( gpn * prefill_nodes )) - prefill_args["--dist-init-addr"]="${prefill_master_ip}:${dynamo_args["prefill-port"]}" - prefill_args["--nnodes"]="${prefill_nodes}" - prefill_args["--node-rank"]="$([[ "$prefill_rank" -ge 0 ]] && echo "$prefill_rank" || echo 0)" - prefill_args["--tp-size"]="${prefill_args["--tp-size"]:-${prefill_total_gpus}}" - prefill_args["--dp-size"]="${prefill_args["--dp-size"]:-${prefill_total_gpus}}" + if [[ "$prefill_nodes" -gt 0 ]]; then + local prefill_master_host="$(_first_in_csv "${prefill_config["node-list"]}")" + local prefill_master_ip="$(_resolve_host_ip "${prefill_master_host}")" + local prefill_rank="$(_csv_index_of "${prefill_config["node-list"]}" "$self")" + local prefill_total_gpus=$(( gpn * prefill_nodes )) + prefill_args["--dist-init-addr"]="${prefill_master_ip}:${dynamo_args["prefill-port"]}" + prefill_args["--nnodes"]="${prefill_nodes}" + prefill_args["--node-rank"]="$([[ "$prefill_rank" -ge 0 ]] && echo "$prefill_rank" || echo 0)" + prefill_args["--tp-size"]="${prefill_args["--tp-size"]:-${prefill_total_gpus}}" + prefill_args["--dp-size"]="${prefill_args["--dp-size"]:-${prefill_total_gpus}}" + fi # decode group local decode_nodes="${decode_config["num-nodes"]}" @@ -296,7 +298,7 @@ _apply_connector_settings() { _patch_dynamo_args() { if [[ -z "${dynamo_args["frontend-node"]}" ]]; then - dynamo_args["frontend-node"]=$(echo ${decode_config["node-list"]} | cut -d',' -f1) + dynamo_args["frontend-node"]=$(echo "${decode_config["node-list"]}" | cut -d',' -f1) fi dynamo_args["url"]="http://${dynamo_args["frontend-node"]}:${dynamo_args["port"]}" @@ -534,17 +536,17 @@ _current_node_name() { _is_frontend_node() { local name="$(_current_node_name)" - [[ "${dynamo_args["frontend-node"]}" == *"$name"* ]] + [[ ",${dynamo_args["frontend-node"]}," == *",$name,"* ]] } _is_decode_node() { local name="$(_current_node_name)" - [[ "${decode_config["node-list"]}" == *"$name"* ]] + [[ ",${decode_config["node-list"]}," == *",$name,"* ]] } _is_prefill_node() { local name="$(_current_node_name)" - [[ "${prefill_config["node-list"]}" == *"$name"* ]] + [[ ",${prefill_config["node-list"]}," == *",$name,"* ]] } _is_genai_perf_workload() { @@ -577,11 +579,11 @@ function launch_node_setup_cmd() logfile="${RESULTS_DIR}/node_setup_$(_current_node_name).log" if [[ -n "${dynamo_args["node-setup-cmd"]}" ]]; then log "Launching node setup command: ${dynamo_args["node-setup-cmd"]}" - bash -c "${dynamo_args["node-setup-cmd"]}" 2>&1 >> "$logfile" + bash -c "${dynamo_args["node-setup-cmd"]}" >> "$logfile" 2>&1 log "Node setup complete" fi - log "Node environment:\n$(env)" 2>&1 >> "$logfile" + log "Node environment:\n$(env)" >> "$logfile" 2>&1 } _require_cmd() { @@ -783,7 +785,7 @@ function launch_decode() function wait_for_etcd() { - while [ "`curl -ks ${ETCD_ENDPOINTS}/readyz`" != "ok" ]; do + while [ "$(curl -ks ${ETCD_ENDPOINTS}/readyz)" != "ok" ]; do log "Waiting for etcd to be ready by polling ${ETCD_ENDPOINTS}/readyz"; sleep 10; done @@ -836,11 +838,11 @@ function launch_lmcache_controller() function wait_for_dynamo_frontend() { - local want_prefill=0 #$(_expected_ready_prefill) + local want_prefill=$(_expected_ready_prefill) local want_decode=$(_expected_ready_decode) while :; do - local have_prefill=0 #$(_count_initialized_prefill) + local have_prefill=$(_count_initialized_prefill) local have_decode=$(_count_initialized_decode) log "Initialized: prefill ${have_prefill}/${want_prefill}; decode ${have_decode}/${want_decode}" @@ -911,9 +913,9 @@ function setup_storage_cache_dir() { # Use a global variable that can be exported STORAGE_CACHE_DIR="$STORAGE_CACHE_DIR/${TEST_USER}/${dynamo_args["frontend-node"]}/${dynamo_args["connector"]}/cache" - rm -rf ${STORAGE_CACHE_DIR} - mkdir -p ${STORAGE_CACHE_DIR} - chmod 755 ${STORAGE_CACHE_DIR} + rm -rf "${STORAGE_CACHE_DIR}" + mkdir -p "${STORAGE_CACHE_DIR}" + chmod 755 "${STORAGE_CACHE_DIR}" } function setup_kvbm() diff --git a/src/cloudai/workloads/ai_dynamo/custom_workload.sh b/src/cloudai/workloads/ai_dynamo/custom_workload.sh index e22875566..6a3d43d9a 100644 --- a/src/cloudai/workloads/ai_dynamo/custom_workload.sh +++ b/src/cloudai/workloads/ai_dynamo/custom_workload.sh @@ -198,7 +198,7 @@ function compute_kv_cache_token_size_from_log() log "Computing KV cache token sizes from worker log files..." # Find decode worker log file(s) in result_dir - local decode_log=$(find $result_dir -name "dynamo_decode_0_0.log" 2>/dev/null | head -1) + local decode_log=$(find "$result_dir" -name "dynamo_decode_0_0.log" 2>/dev/null | head -1) if [[ -z "$decode_log" ]]; then log "WARNING: No decode worker log found in $result_dir, falling back to query-based method" return 1 diff --git a/src/cloudai/workloads/ai_dynamo/genai_perf.sh b/src/cloudai/workloads/ai_dynamo/genai_perf.sh index 26d9cbcf0..8a1d29483 100644 --- a/src/cloudai/workloads/ai_dynamo/genai_perf.sh +++ b/src/cloudai/workloads/ai_dynamo/genai_perf.sh @@ -84,7 +84,7 @@ full_cmd="$cmd $cmdline_args" # launch genai-perf log "Launching genai-perf with args: $full_cmd" -eval $full_cmd +eval "$full_cmd" log "Done with genai-perf run" @@ -92,9 +92,9 @@ log "Done with genai-perf run" num_nodes=${SLURM_JOB_NUM_NODES:-1} total_gpus=$(( $gpus_per_node * $num_nodes )) -profile_path=$(find $result_dir -type f -name "profile_genai_perf.csv" -print -quit) -sed -ie 's/\r//g' $profile_path +profile_path=$(find "$result_dir" -type f -name "profile_genai_perf.csv" -print -quit) if [[ -f "$profile_path" ]]; then + sed -i 's/\r//g' "$profile_path" output_tokens_per_second=$(grep "Output Token Throughput (tokens/sec)" $profile_path | cut -d ',' -f 2) output_tokens_per_second_per_gpu=$(awk "BEGIN {printf \"%.2f\", $output_tokens_per_second / $total_gpus}") request_throughput=$(grep "Request Throughput (per sec)" $profile_path | cut -d ',' -f 2) diff --git a/src/cloudai/workloads/ai_dynamo/lmbench.sh b/src/cloudai/workloads/ai_dynamo/lmbench.sh index 75eaffc8a..e37249b8b 100644 --- a/src/cloudai/workloads/ai_dynamo/lmbench.sh +++ b/src/cloudai/workloads/ai_dynamo/lmbench.sh @@ -84,9 +84,9 @@ while [[ $# -gt 0 ]]; do esac done -pushd $lmbench_dir +pushd "$lmbench_dir" || { log "ERROR: Cannot cd to $lmbench_dir"; exit 1; } -cmdline_args="${cmdline_args} --output ${result_dir}/lmbench_bench_output.csv ${key} ${extra_args}" +cmdline_args="${cmdline_args} ${extra_args}" # launch lmbench @@ -105,13 +105,13 @@ cmdline_args="${cmdline_args} --output ${result_dir}/lmbench_bench_output.csv ${ log "Launching lmbench with args: $cmd $cmdline_args" artifacts_dir="${result_dir}/lmbench_artifacts" -mkdir -p $artifacts_dir +mkdir -p "$artifacts_dir" for qps in ${all_qps//,/ }; do log "Launching lmbench with args: $cmd $cmdline_args --qps $qps --output $output_file" output_file="${artifacts_dir}/lmbench_bench_output_${qps}.csv" report_file="${result_dir}/${report_name//.csv/_${qps}.csv}" - eval "$cmd $cmdline_args --qps $qps --output $output_file" 2>&1 > ${artifacts_dir}/lmbench_bench_output_${qps}.log + eval "$cmd $cmdline_args --qps $qps --output $output_file" > "${artifacts_dir}/lmbench_bench_output_${qps}.log" 2>&1 python3 ${install_dir}/${calc_percentile_csv_script} --input $output_file --output ${report_file} done From 1851cdd59ad1917c1cbb9f55e91318f930dc1d5c Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 05:37:17 +0000 Subject: [PATCH 09/22] Updated ai-dynamo toml files. --- conf/experimental/ai_dynamo/test/vllm.toml | 99 ++++++++++++++++--- .../test_scenario/vllm_kvbm_slurm.toml | 72 ++++++++++++++ .../ai_dynamo/test_scenario/vllm_slurm.toml | 8 +- 3 files changed, 165 insertions(+), 14 deletions(-) create mode 100644 conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index 6708d54be..7d5e87f51 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -19,29 +19,84 @@ description = "vLLM backend with Qwen3-0.6B model" test_template_name = "AIDynamo" [cmd_args] -docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.7.0" +docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.8.1" +num_nodes = 2 [cmd_args.dynamo] backend = "vllm" model = "Qwen/Qwen3-0.6B" - workspace-path = "/workspace/examples/backends/vllm" - prefill-cmd = 'python3 -m dynamo.vllm --is-prefill-worker' - decode-cmd = 'python3 -m dynamo.vllm' + workspace-path = "/workspace" + node-setup-cmd = "/usr/local/ucx/bin/ucx_info -d |grep Transport | sort -u;" + ingress-cmd = "python -m dynamo.frontend --router-mode kv" + port = 8787 + endpoint = "v1/chat/completions" + etcd-cmd = "etcd --log-level info --data-dir /tmp/etcd " + nats-cmd = "nats-server -js" + etcd-port = 2379 + nats-port = 4222 + workloads = "genai_perf.sh,lmbench.sh,custom_workload.sh" + worker-error-pattern = "zmq.error.ZMQError:.Address.already.in.use|ERROR.core.run_engine_core:.EngineCore.failed.to.start|ERROR.multiproc_executor.worker_busy_loop:.WorkerProc.hit.an.exception|ValueError:.a.python.*async.generator:.EngineDeadError:.EngineCore.encountered.an.issue|ZeroDivisionError:.integer.division.or.modulo.by.zero|ERROR.core.run_engine_core:.EngineCore.encountered.a.fatal.error|Exception:.Failed.to.fetch.model|ERROR.*Engine.core.proc.EngineCore_.*died.unexpectedly|RuntimeError:.Engine.core.initialization.failed." + + [cmd_args.dynamo.prefill_worker] + num-nodes = 1 + #node-list = "" + cmd = 'python3 -m dynamo.vllm --is-prefill-worker --enforce-eager' + worker-initialized-regex = 'VllmWorker.*has.been.initialized' + multiple-workers-per-node = "false" + extra-args = "--no-enable-expert-parallel" + + [cmd_args.dynamo.prefill_worker.args] + model = "%MODEL%" + gpu-memory-utilization = 0.8 + tensor-parallel-size = 8 + pipeline-parallel-size = 1 + data-parallel-size = 1 [cmd_args.dynamo.decode_worker] num-nodes = 1 + #node-list = "" + cmd = 'python3 -m dynamo.vllm --enforce-eager' + worker-initialized-regex = 'VllmWorker.*has.been.initialized' + multiple-workers-per-node = "false" + extra-args = "--no-enable-expert-parallel" [cmd_args.dynamo.decode_worker.args] - model = "Qwen/Qwen3-0.6B" - gpu-memory-utilization = 0.95 + model = "%MODEL%" + gpu-memory-utilization = 0.8 tensor-parallel-size = 8 pipeline-parallel-size = 1 data-parallel-size = 1 + [cmd_args.lmcache] + controller_cmd = "lmcache_controller --host localhost --port 9000 --monitor-port 9001" + + [cmd_args.lmcache.args] + chunk_size = 256 + local_cpu = false + nixl_buffer_size = 10737418240 + nixl_buffer_device = "cuda" + extra_config_enable_nixl_storage = true + extra_config_nixl_backend = "GDS_MT" + extra_config_nixl_file_pool_size = 64 + extra_config_nixl_path = "%CACHEDIR%" + + enable_controller = true + lmcache_instance_id = "lmcache_default_instance" + controller_url = "localhost:9001" + lmcache_worker_port = 8788 + distributed_url = "localhost:8789" + [cmd_args.genai_perf] - model = "Qwen/Qwen3-0.6B" - endpoint = "v1/chat/completions" + cmd = "genai-perf profile" + extra-args = "--streaming --verbose -- -v --async" + + [cmd_args.genai_perf.args] + model = "%MODEL%" + url = "%URL%" + endpoint = "%ENDPOINT%" endpoint-type = "chat" + artifact-dir = "%RESULTS_DIR%/genai_perf_artifacts" + profile-export-file = "profile.json" extra-inputs = 'min_tokens:10' output-tokens-mean = 500 output-tokens-stddev = 0 @@ -51,13 +106,33 @@ docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.7.0" synthetic-input-tokens-stddev = 0 warmup-request-count = 5 concurrency = 2 - extra-args = "--streaming -- -v --async" - - [cmd_args.lmcache] [cmd_args.lmbench] + cmd = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" + + [cmd_args.lmbench.args] + num-users = 15 + num-rounds = 20 + qps = 0.1 + shared-system-prompt = 1000 + user-history-prompt = 20000 + answer-len = 100 + model = "%MODEL%" + base-url = "%URL%" + init-user-id = "1" + log-interval = 30 + time = "100" + + [cmd_args.custom_workload] + cmd = "hostname" + isl = "1000,2000,4000,8000,16000,24000,32000,48000,64000,96000,128000" + + [cmd_args.custom_workload.args] [extra_env_vars] UCX_LOG_LEVEL = "warn" -UCX_TLS = "cuda_copy,rc_x" +HF_HUB_OFFLINE = "1" +TRANSFORMERS_OFFLINE = "1" +HF_DATASETS_OFFLINE = "1" DYNAMO_NODELIST = "$(scontrol show hostname $SLURM_JOB_NODELIST | tr -s '\\n' ',')" +UCX_TLS = "cuda_copy,rc_x" \ No newline at end of file diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml new file mode 100644 index 000000000..b604b8378 --- /dev/null +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml @@ -0,0 +1,72 @@ +name = "vllm_kvbm" + +[[Tests]] +id = "vLLM-KVBM-Qwen3-8B" +test_name = "vLLM-KVBM-Qwen3-8B" +time_limit = "20:00:00" + +extra_container_mounts = [ + "/run/udev:/run/udev", + "/tmp:/tmp"] + + [Tests.cmd_args] + #storage_cache_dir = "/raid/users/kapila" + storage_cache_dir = "/mnt/vast/kapila" + hf_home_path = "/mnt/vast/disagg_inf/huggingface" + num_nodes = 2 # 1 prefill node + 1 decode node + + [Tests.cmd_args.dynamo] + model = "Qwen/Qwen3-8B" + connector = "none" #"kvbm" #["none", "kvbm"] + ingress-cmd = "python -m dynamo.frontend" # --router-mode kv --no-kv-events --kv-overlap-score-weight=0" # --router-mode kv" + workloads = "custom_workload.sh" #,genai_perf.sh,lmbench.sh" + #node-setup-cmd = "/usr/local/ucx/bin/ucx_info -d |grep Transport | sort -u; (cd /opt/dynamo/venv/lib/python3.12/site-packages/dynamo && patch -p4 < /cloudai_install/clear_kv_blocks_engine_route.patch)" + node-setup-cmd = "hostname" + + [Tests.cmd_args.dynamo.prefill_worker] + num-nodes = 1 + + [Tests.cmd_args.dynamo.prefill_worker.args] + block-size = 32 + tensor-parallel-size = 1 + + [Tests.cmd_args.dynamo.decode_worker] + num-nodes = 1 + + [Tests.cmd_args.dynamo.decode_worker.args] + block-size = 32 + tensor-parallel-size = 1 + + [Tests.cmd_args.custom_workload] + isl = "1000,2000,4000,8000,16000,24000,32000,40000" + + [Tests.extra_env_vars] + CUFILE_LOG_LEVEL="INFO" + CUFILE_LOGGING_LEVEL="INFO" + PYTHONHASHSEED="0" + + # Dynamo Flags + DYN_LOG="info" + DYN_SYSTEM_PORT="8081" # Enable system metrics + + # KVBM Flags + DYN_KVBM_METRICS="1" + DYN_KVBM_METRICS_PORT="6880" # Default port + + # set a large timeout for allocating the disk + DYN_KVBM_LEADER_WORKER_INIT_TIMEOUT_SECS="1200" + DYN_KVBM_DISABLE_DISK_OFFLOAD_FILTER="1" # Force KV cache write on first request + + # Use it only on vast. + #DYN_KVBM_DISK_ZEROFILL_FALLBACK="true" + + # set a relatively small CPU cache, so we can do quick disk onboarding + DYN_KVBM_CPU_CACHE_GB="50" + # set a large disk cache, so we are actually testing the NIXL with onboarding + #DYN_KVBM_DISK_CACHE_GB="100" + + DYN_KVBM_NIXL_BACKEND_UCX="True" + DYN_KVBM_NIXL_BACKEND_GDS="True" + + # vLLM Flags + VLLM_SERVER_DEV_MODE = "1" \ No newline at end of file diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml index 93f08dd4b..d0f55058c 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml @@ -20,9 +20,11 @@ job_status_check = false [[Tests]] id = "test.disagg.single-node" test_name = "vLLM-Qwen3-0.6B" -num_nodes = 2 # 1 prefill node + 1 decode node time_limit = "00:10:00" + [Tests.cmd_args] + num_nodes = 2 # 1 prefill node + 1 decode node + [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 1 [Tests.cmd_args.dynamo.prefill_worker.args] @@ -38,9 +40,11 @@ time_limit = "00:10:00" [[Tests]] id = "test.disagg.multinode" test_name = "vLLM-Qwen3-0.6B" -num_nodes = 4 # 2 prefill nodes + 2 decode nodes time_limit = "00:10:00" + [Tests.cmd_args] + num_nodes = 4 # 2 prefill nodes + 2 decode nodes + [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 2 [Tests.cmd_args.dynamo.prefill_worker.args] From 3ab210b7153b706513247a838e069b81c78ea653 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 06:15:02 +0000 Subject: [PATCH 10/22] Added aiperf. --- conf/experimental/ai_dynamo/test/vllm.toml | 40 +++++ src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 136 +++++++++++++- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 18 +- src/cloudai/workloads/ai_dynamo/aiperf.sh | 175 +++++++++++++++++++ 4 files changed, 367 insertions(+), 2 deletions(-) create mode 100644 src/cloudai/workloads/ai_dynamo/aiperf.sh diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index 7d5e87f51..b3025dea7 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -107,6 +107,46 @@ num_nodes = 2 warmup-request-count = 5 concurrency = 2 + [cmd_args.aiperf] + cmd = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" + + [cmd_args.aiperf.args] + model = "%MODEL%" + url = "%URL%" + endpoint = "%ENDPOINT%" + artifact-dir = "%RESULTS_DIR%/aiperf" + endpoint-type = "chat" + streaming = true + warmup-request-count = 1 + export-level = "raw" + benchmark-duration = 100 + + # Server metrics collection - set in test_scenario with correct service names per test + # LMCache metrics are exposed via vLLM worker's /metrics endpoint + server-metrics-formats = ["json", "csv"] + + # initla prompt the same for all users + shared-system-prompt-length = 1024 + # 3K per-user context: unique per session, requires num-dataset-entries + user-context-prompt-length = 3072 + #user sends eeach iteration 1023 + synthetic-input-tokens-mean = 1024 + # user gets each iteration 100 + osl = 100 + num-dataset-entries = 10 + + # Multi-turn conversation settings: 10 users, 20 turns each, message every 1 sec + user-centric-rate = 10.0 + num-users = 10 + conversation-turn-mean = 20 + + # 1 second delay between turns (simulates user think time) + conversation-turn-delay-mean = 1000 + + # Turn sequence: 1K ISL / 100 OSL for all 20 turns + turn-sequence = "1024,100*20" + + [cmd_args.lmbench] cmd = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index 42d48dd42..99413d882 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -220,6 +220,135 @@ class GenAIPerf(Workload): cmd: str = "genai-perf profile" script: File = File(Path(__file__).parent.parent / "ai_dynamo/genai_perf.sh") +class AIPerfArgs(BaseModel): + """Arguments for AIPerf profiling - alternative to GenAI-Perf.""" + + model_config = ConfigDict(extra="allow", populate_by_name=True) + + concurrency: int | None = Field(default=None) + request_rate: float | None = Field( + default=None, + serialization_alias="request-rate", + validation_alias=AliasChoices("request-rate", "request_rate"), + ) + synthetic_input_tokens_mean: int | None = Field( + default=None, + serialization_alias="synthetic-input-tokens-mean", + validation_alias=AliasChoices("synthetic-input-tokens-mean", "synthetic_input_tokens_mean"), + ) + synthetic_input_tokens_stddev: int = Field( + default=0, + serialization_alias="synthetic-input-tokens-stddev", + validation_alias=AliasChoices("synthetic-input-tokens-stddev", "synthetic_input_tokens_stddev"), + ) + output_tokens_mean: int | None = Field( + default=None, + serialization_alias="output-tokens-mean", + validation_alias=AliasChoices("output-tokens-mean", "output_tokens_mean"), + ) + output_tokens_stddev: int = Field( + default=0, + serialization_alias="output-tokens-stddev", + validation_alias=AliasChoices("output-tokens-stddev", "output_tokens_stddev"), + ) + request_count: int | None = Field( + default=None, + serialization_alias="request-count", + validation_alias=AliasChoices("request-count", "request_count"), + ) + benchmark_duration: int | None = Field( + default=None, + serialization_alias="benchmark-duration", + validation_alias=AliasChoices("benchmark-duration", "benchmark_duration"), + ) + streaming: bool = Field(default=True) + warmup_request_count: int = Field( + default=10, + serialization_alias="warmup-request-count", + validation_alias=AliasChoices("warmup-request-count", "warmup_request_count"), + ) + endpoint_type: str = Field( + default="chat", + serialization_alias="endpoint-type", + validation_alias=AliasChoices("endpoint-type", "endpoint_type"), + ) + ui_type: str = Field( + default="simple", + serialization_alias="ui-type", + validation_alias=AliasChoices("ui-type", "ui_type"), + ) + export_level: Literal["summary", "records", "raw"] = Field( + default="records", + serialization_alias="export-level", + validation_alias=AliasChoices("export-level", "export_level"), + description="Controls output detail: summary (aggregate only), records (per-request metrics), raw (full request/response data)", + ) + slice_duration: float | None = Field( + default=5.0, + serialization_alias="slice-duration", + validation_alias=AliasChoices("slice-duration", "slice_duration"), + description="Duration in seconds for time-sliced metric analysis. Enables bar chart visualizations.", + ) + + # Multi-turn / Agentic mode parameters + conversation_num: int | None = Field( + default=None, + serialization_alias="conversation-num", + validation_alias=AliasChoices("conversation-num", "conversation_num"), + description="Total number of conversation sessions for multi-turn benchmarks.", + ) + conversation_turn_mean: int | None = Field( + default=None, + serialization_alias="conversation-turn-mean", + validation_alias=AliasChoices("conversation-turn-mean", "conversation_turn_mean"), + description="Average number of turns (steps) per conversation session.", + ) + conversation_turn_stddev: int | None = Field( + default=None, + serialization_alias="conversation-turn-stddev", + validation_alias=AliasChoices("conversation-turn-stddev", "conversation_turn_stddev"), + description="Standard deviation for turn counts per session.", + ) + conversation_turn_delay_mean: int | None = Field( + default=None, + serialization_alias="conversation-turn-delay-mean", + validation_alias=AliasChoices("conversation-turn-delay-mean", "conversation_turn_delay_mean"), + description="Mean delay between turns in milliseconds (simulates user think time).", + ) + conversation_turn_delay_stddev: int | None = Field( + default=None, + serialization_alias="conversation-turn-delay-stddev", + validation_alias=AliasChoices("conversation-turn-delay-stddev", "conversation_turn_delay_stddev"), + description="Standard deviation for turn delays in milliseconds.", + ) + turn_sequence: str | None = Field( + default=None, + serialization_alias="turn-sequence", + validation_alias=AliasChoices("turn-sequence", "turn_sequence"), + description=( + "Explicit ISL/OSL pairs for each turn. Format: 'ISL,OSL;ISL,OSL;...' " + "Example: '1024,100;2048,100;3072,200' means turn 1=ISL 1024/OSL 100, " + "turn 2=ISL 2048/OSL 100, etc. Cycles if more turns than defined pairs." + ), + ) + + +class AIPerf(Workload): + """Workload configuration for AIPerf.""" + + model_config = ConfigDict(extra="allow") + + name: str = "aiperf" + cmd: str = "aiperf profile" + args: AIPerfArgs = Field(default_factory=AIPerfArgs) + script: File = File(Path(__file__).parent.parent / "ai_dynamo/aiperf.sh") + + extra_args: str | None = Field( + default=None, + serialization_alias="extra-args", + validation_alias=AliasChoices("extra-args", "extra_args"), + ) + class LMBench(Workload): """Workload configuration for LMBench.""" @@ -268,6 +397,7 @@ class AIDynamoCmdArgs(CmdArgs): dynamo: AIDynamoArgs lmcache: LMCache = Field(default_factory=LMCache) genai_perf: GenAIPerf = Field(default_factory=GenAIPerf) + aiperf: AIPerf = Field(default_factory=AIPerf) lmbench: LMBench = Field(default_factory=LMBench) custom_workload: CustomWorkload = Field(default_factory=CustomWorkload) @@ -279,6 +409,7 @@ class AIDynamoTestDefinition(TestDefinition): _docker_image: Optional[DockerImage] = None script: File = File(Path(__file__).parent.parent / "ai_dynamo/ai_dynamo.sh") genai_perf_script: File = File(Path(__file__).parent.parent / "ai_dynamo/genai_perf.sh") + aiperf_script: File = File(Path(__file__).parent.parent / "ai_dynamo/aiperf.sh") calc_percentile_csv: File = File(Path(__file__).parent.parent / "ai_dynamo/calc_percentile_csv.py") dynamo_repo: GitRepo = GitRepo( url="https://github.com/ai-dynamo/dynamo.git", @@ -286,13 +417,14 @@ class AIDynamoTestDefinition(TestDefinition): mount_as="/git/dynamo", ) _hf_model: HFModel | None = None - workloads: str = "genai_perf.sh,lmbench.sh,custom_workload.sh" + workloads: str = "genai_perf.sh,aiperf.sh,lmbench.sh,custom_workload.sh" constraints: Constraints = Constraints() def get_workload_map(self) -> dict[str, Workload]: """Get a map of workload scripts to workload objects.""" return { self.cmd_args.genai_perf.script.src.name: self.cmd_args.genai_perf, + self.cmd_args.aiperf.script.src.name: self.cmd_args.aiperf, self.cmd_args.lmbench.script.src.name: self.cmd_args.lmbench, self.cmd_args.custom_workload.script.src.name: self.cmd_args.custom_workload, } @@ -314,6 +446,7 @@ def validate_test_definition(self) -> "AIDynamoTestDefinition": for workload in workloads: if workload not in [ self.cmd_args.genai_perf.script.src.name, + self.cmd_args.aiperf.script.src.name, self.cmd_args.lmbench.script.src.name, self.cmd_args.custom_workload.script.src.name, ]: @@ -342,6 +475,7 @@ def installables(self) -> list[Installable]: self.script, #self.hf_model, self.genai_perf_script, + self.aiperf_script, self.calc_percentile_csv, self.cmd_args.lmbench.script, self.cmd_args.custom_workload.script, diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index 0a31c7d66..4d2d12fc7 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -35,6 +35,8 @@ declare -A lmcache_args declare -A lmcache_config declare -A genai_perf_args declare -A genai_perf_config +declare -A aiperf_args +declare -A aiperf_config declare -A lmbench_args declare -A lmbench_config declare -A custom_workload_args @@ -176,6 +178,10 @@ _parse_cli_pairs() { genai_perf_args["--${key#--genai_perf-args-}"]="$2" ;; --genai_perf-*) genai_perf_config["--${key#--genai_perf-}"]="$2" ;; + --aiperf-args-*) + aiperf_args["--${key#--aiperf-args-}"]="$2" ;; + --aiperf-*) + aiperf_config["--${key#--aiperf-}"]="$2" ;; --custom_workload-args-*) custom_workload_args["--${key#--custom_workload-args-}"]="$2" ;; --custom_workload-*) @@ -386,7 +392,9 @@ _dump_args() { log "LMCache config params:\n$(arg_array_to_string lmcache_config)" log "LMCache args:\n$(arg_array_to_string lmcache_args)" log "GenAI config params:\n$(arg_array_to_string genai_perf_config)" - log "GenAI perf args:\n$(arg_array_to_string genai_perf_args)" + log "GenAI-Perf args:\n$(arg_array_to_string genai_perf_args)" + log "AIPerf config params:\n$(arg_array_to_string aiperf_config)" + log "AIPerf args:\n$(arg_array_to_string aiperf_args)" log "LMBench config params:\n$(arg_array_to_string lmbench_config)" log "LMBench args:\n$(arg_array_to_string lmbench_args)" log "Custom workload config params:\n$(arg_array_to_string custom_workload_config)" @@ -553,6 +561,10 @@ _is_genai_perf_workload() { [[ "${dynamo_args["workloads"]}" == *"genai_perf.sh"* ]] } +_is_aiperf_workload() { + [[ "${dynamo_args["workloads"]}" == *"aiperf.sh"* ]] +} + _is_lmbench_workload() { [[ "${dynamo_args["workloads"]}" == *"lmbench.sh"* ]] } @@ -1015,6 +1027,7 @@ function launch_workload() --gpus_per_node "$(_gpus_per_node)" \ --connector "${dynamo_args["connector"]}" \ --kvbm_metrics_port "${DYN_KVBM_METRICS_PORT:-6880}" \ + --decode-nodes "${decode_config["node-list"]}" \ $expanded_config \ -- $expanded_arguments > ${RESULTS_DIR}/$workload_name.log 2>&1 @@ -1028,6 +1041,9 @@ function launch_workloads() if _is_genai_perf_workload; then launch_workload genai_perf_config genai_perf_args fi + if _is_aiperf_workload; then + launch_workload aiperf_config aiperf_args + fi if _is_lmbench_workload; then launch_workload lmbench_config lmbench_args fi diff --git a/src/cloudai/workloads/ai_dynamo/aiperf.sh b/src/cloudai/workloads/ai_dynamo/aiperf.sh new file mode 100644 index 000000000..f692c6e28 --- /dev/null +++ b/src/cloudai/workloads/ai_dynamo/aiperf.sh @@ -0,0 +1,175 @@ +#! /bin/bash + +# Called as: + # ./aiperf.sh --result_dir --report_file --calc_percentile_csv_script --gpus_per_node -- + +# extract result_dir, report_file, and calc_percentile_csv_script from the command line arguments +result_dir="" +report_name="aiperf_report.csv" +gpus_per_node=1 +port="" +cmd="" +extra_args="" +declare -A aiperf_args +decode_nodes="" +aiperf_profile_csv="profile_export_aiperf.csv" +metrics_urls="" + +# Simple log function +log() { + echo "[$(date +%F\ %T) $(hostname)]: $*" +} + +function parse_aiperf_args() +{ + while [[ $# -gt 0 ]]; do + case "$1" in + --*) + aiperf_args["${1}"]="$2" + shift 2 + ;; + *) + shift + ;; + esac + done +} + +function process_args() +{ + while [[ $# -gt 0 ]]; do + case "$1" in + --model) + model="$2" + shift 2 + ;; + --url) + url="$2" + shift 2 + ;; + --port) + port="$2" + shift 2 + ;; + --endpoint) + endpoint="$2" + shift 2 + ;; + --result_dir) + result_dir="$2" + shift 2 + ;; + --install_dir) + install_dir="$2" + shift 2 + ;; + --gpus_per_node) + gpus_per_node="$2" + shift 2 + ;; + --report_name) + report_name="$2" + shift 2 + ;; + --cmd) + cmd="$2" + shift 2 + ;; + --decode-nodes) + decode_nodes="$2" + shift 2 + ;; + --extra-args|--extra_args) + extra_args="$2" + shift 2 + ;; + --) + shift + parse_aiperf_args "$@" + break + ;; + --*) + shift 2 + ;; + *) + shift + ;; + esac + done + + log """Parsed args: + model: $model + url: $url + port: $port + result_dir: $result_dir + install_dir: $install_dir + report_name: $report_name + aiperf_args: $(for key in "${!aiperf_args[@]}"; do echo -n "$key: ${aiperf_args[$key]} "; done) + """ +} + + +function _resolve_server_metrics_auto() +{ + # Auto-discover Prometheus metrics endpoints for Dynamo deployments + # Returns space-separated list of URLs for --server-metrics + + # Frontend metrics (port from dynamo config) + local frontend_url="http://${url}:${port}/metrics" + metrics_urls="$frontend_url" + + local IFS_SAVE="$IFS" + IFS=',' + for node in $decode_nodes; do + local decode_url="http://${node}:9090/metrics" + metrics_urls="$metrics_urls $decode_url" + done + IFS="$IFS_SAVE" + + log "Auto-discovered server-metrics URLs: $metrics_urls" +} + + +function main() +{ + process_args "$@" + + report_file=$result_dir/$report_name + + # Handle server-metrics = "auto" - auto-discover endpoints + if [[ "${aiperf_args["--server-metrics"]}" == "auto" ]]; then + _resolve_server_metrics_auto + aiperf_args["--server-metrics"]="$metrics_urls" + fi + + # Combine aiperf_args (key-value pairs) and extra_args + cmdline_args="" + for key in "${!aiperf_args[@]}"; do + local val="${aiperf_args[$key]}" + # Quote values that contain spaces so eval doesn't split them + if [[ "$val" == *" "* ]]; then + val="${val//\"/\\\"}" # Escape existing quotes + cmdline_args+="$key \"${val}\" " + else + cmdline_args+="$key ${val} " + fi + done + cmdline_args+="$extra_args" + + # Build the full command with model and url + full_cmd="$cmd $cmdline_args" + + # launch aiperf + log "Launching aiperf with args: $full_cmd" + + eval "$full_cmd" + + log "Done with aiperf run" + + profile_path=$(find "$result_dir" -type f -name "$aiperf_profile_csv" -print -quit) + if [[ -f "$profile_path" ]]; then + sed 's/\r//g' "$profile_path" > "$report_file" + fi +} + +main "$@" \ No newline at end of file From 63e24ad09295121abeb607b72a8362e7b1ff77c6 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 06:20:04 +0000 Subject: [PATCH 11/22] Renamed custom_workload.sh -> kvstorage.sh. --- src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 22 ++++++++--------- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 24 +++++++++---------- .../{custom_workload.sh => kvstorage.sh} | 14 +++++------ 3 files changed, 30 insertions(+), 30 deletions(-) rename src/cloudai/workloads/ai_dynamo/{custom_workload.sh => kvstorage.sh} (95%) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index 99413d882..83c2a7116 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -366,14 +366,14 @@ class LMBench(Workload): ) -class CustomWorkload(Workload): - """Generic workload script.""" +class KVStorage(Workload): + """KV storage workload script.""" model_config = ConfigDict(extra="allow") - name: str = "custom_workload" + name: str = "kvstorage" cmd: str = "hostname" - script: File = File(Path(__file__).parent.parent / "ai_dynamo/custom_workload.sh") + script: File = File(Path(__file__).parent.parent / "ai_dynamo/kvstorage.sh") class Constraints(BaseModel): @@ -399,7 +399,7 @@ class AIDynamoCmdArgs(CmdArgs): genai_perf: GenAIPerf = Field(default_factory=GenAIPerf) aiperf: AIPerf = Field(default_factory=AIPerf) lmbench: LMBench = Field(default_factory=LMBench) - custom_workload: CustomWorkload = Field(default_factory=CustomWorkload) + kvstorage: KVStorage = Field(default_factory=KVStorage) class AIDynamoTestDefinition(TestDefinition): @@ -417,7 +417,7 @@ class AIDynamoTestDefinition(TestDefinition): mount_as="/git/dynamo", ) _hf_model: HFModel | None = None - workloads: str = "genai_perf.sh,aiperf.sh,lmbench.sh,custom_workload.sh" + workloads: str = "genai_perf.sh,aiperf.sh,lmbench.sh,kvstorage.sh" constraints: Constraints = Constraints() def get_workload_map(self) -> dict[str, Workload]: @@ -426,7 +426,7 @@ def get_workload_map(self) -> dict[str, Workload]: self.cmd_args.genai_perf.script.src.name: self.cmd_args.genai_perf, self.cmd_args.aiperf.script.src.name: self.cmd_args.aiperf, self.cmd_args.lmbench.script.src.name: self.cmd_args.lmbench, - self.cmd_args.custom_workload.script.src.name: self.cmd_args.custom_workload, + self.cmd_args.kvstorage.script.src.name: self.cmd_args.kvstorage, } @model_validator(mode="after") @@ -438,8 +438,8 @@ def validate_test_definition(self) -> "AIDynamoTestDefinition": self.git_repos.append(self.cmd_args.lmcache.repo) if self.cmd_args.lmbench.repo: self.git_repos.append(self.cmd_args.lmbench.repo) - if self.cmd_args.custom_workload.repo: - self.git_repos.append(self.cmd_args.custom_workload.repo) + if self.cmd_args.kvstorage.repo: + self.git_repos.append(self.cmd_args.kvstorage.repo) # Validate benchmark names workloads = self.workloads.split(",") @@ -448,7 +448,7 @@ def validate_test_definition(self) -> "AIDynamoTestDefinition": self.cmd_args.genai_perf.script.src.name, self.cmd_args.aiperf.script.src.name, self.cmd_args.lmbench.script.src.name, - self.cmd_args.custom_workload.script.src.name, + self.cmd_args.kvstorage.script.src.name, ]: raise ValueError(f"Invalid workload script: {workload}") @@ -478,7 +478,7 @@ def installables(self) -> list[Installable]: self.aiperf_script, self.calc_percentile_csv, self.cmd_args.lmbench.script, - self.cmd_args.custom_workload.script, + self.cmd_args.kvstorage.script, File(Path(__file__).parent.parent / "ai_dynamo/openai_chat_client.py"), *self.git_repos, ] diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index 4d2d12fc7..b02e39de0 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -39,8 +39,8 @@ declare -A aiperf_args declare -A aiperf_config declare -A lmbench_args declare -A lmbench_config -declare -A custom_workload_args -declare -A custom_workload_config +declare -A kvstorage_args +declare -A kvstorage_config declare -A dynamo_args dynamo_args["backend"]="vllm" @@ -182,10 +182,10 @@ _parse_cli_pairs() { aiperf_args["--${key#--aiperf-args-}"]="$2" ;; --aiperf-*) aiperf_config["--${key#--aiperf-}"]="$2" ;; - --custom_workload-args-*) - custom_workload_args["--${key#--custom_workload-args-}"]="$2" ;; - --custom_workload-*) - custom_workload_config["--${key#--custom_workload-}"]="$2" ;; + --kvstorage-args-*) + kvstorage_args["--${key#--kvstorage-args-}"]="$2" ;; + --kvstorage-*) + kvstorage_config["--${key#--kvstorage-}"]="$2" ;; --hf-home) HUGGINGFACE_HOME="$2" ;; --storage-cache-dir) @@ -397,8 +397,8 @@ _dump_args() { log "AIPerf args:\n$(arg_array_to_string aiperf_args)" log "LMBench config params:\n$(arg_array_to_string lmbench_config)" log "LMBench args:\n$(arg_array_to_string lmbench_args)" - log "Custom workload config params:\n$(arg_array_to_string custom_workload_config)" - log "Custom workload args:\n$(arg_array_to_string custom_workload_args)" + log "KV storage config params:\n$(arg_array_to_string kvstorage_config)" + log "KV storage args:\n$(arg_array_to_string kvstorage_args)" log "--------------------------------" } @@ -569,8 +569,8 @@ _is_lmbench_workload() { [[ "${dynamo_args["workloads"]}" == *"lmbench.sh"* ]] } -_is_custom_workload_workload() { - [[ "${dynamo_args["workloads"]}" == *"custom_workload.sh"* ]] +_is_kvstorage_workload() { + [[ "${dynamo_args["workloads"]}" == *"kvstorage.sh"* ]] } _init_runtime_env() { @@ -1047,8 +1047,8 @@ function launch_workloads() if _is_lmbench_workload; then launch_workload lmbench_config lmbench_args fi - if _is_custom_workload_workload; then - launch_workload custom_workload_config custom_workload_args + if _is_kvstorage_workload; then + launch_workload kvstorage_config kvstorage_args fi mark_done diff --git a/src/cloudai/workloads/ai_dynamo/custom_workload.sh b/src/cloudai/workloads/ai_dynamo/kvstorage.sh similarity index 95% rename from src/cloudai/workloads/ai_dynamo/custom_workload.sh rename to src/cloudai/workloads/ai_dynamo/kvstorage.sh index 6a3d43d9a..5c6483543 100644 --- a/src/cloudai/workloads/ai_dynamo/custom_workload.sh +++ b/src/cloudai/workloads/ai_dynamo/kvstorage.sh @@ -1,11 +1,11 @@ #! /bin/bash # Called as: - # bash ./custom_workload.sh --result_dir --report_file --calc_percentile_csv_script --gpus_per_node -- + # bash ./kvstorage.sh --result_dir --report_name -- # extract result_dir, report_file, and calc_percentile_csv_script from the command line arguments result_dir="" -report_name="custom_workload_report.csv" +report_name="kvstorage_report.csv" model="" url="" port="" @@ -28,7 +28,7 @@ log() { echo "[$(date +%F\ %T) $(hostname)]: $*" } -function parse_custom_workload_args() +function parse_kvstorage_args() { local args="$@" while [[ $# -gt 0 ]]; do @@ -102,7 +102,7 @@ function process_args() ;; --) shift - parse_custom_workload_args "$@" + parse_kvstorage_args "$@" break ;; --*) @@ -328,9 +328,9 @@ function main() log "Dumping CSV header" python3 ${install_dir}/openai_chat_client.py --dump_csv_header --out $report_file - log "Launching custom workload with ISLs: $all_isl" + log "Launching KV storage workload with ISLs: $all_isl" for isl in $(echo $all_isl | tr ',' '\n'); do - log "Launching custom workload with ISL: $isl" + log "Launching KV storage workload with ISL: $isl" python3 ${install_dir}/openai_chat_client.py \ --model $model \ --url $url:$port/v1 \ @@ -345,7 +345,7 @@ function main() clear_kv_cache done - log "Done with custom workload run" + log "Done with KV storage workload run" } main "$@" From 6662d083869f1e21402532b2e8308cde6c004c7e Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 06:35:21 +0000 Subject: [PATCH 12/22] Linter, etc. --- conf/experimental/ai_dynamo/test/vllm.toml | 172 +++++++++--------- .../test_scenario/vllm_kvbm_slurm.toml | 90 +++++---- .../ai_dynamo/test_scenario/vllm_slurm.toml | 48 ++--- src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 23 ++- .../workloads/ai_dynamo/openai_chat_client.py | 42 +++-- .../ai_dynamo/report_generation_strategy.py | 1 + .../ai_dynamo/slurm_command_gen_strategy.py | 3 +- 7 files changed, 205 insertions(+), 174 deletions(-) diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index b3025dea7..75769e543 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -69,105 +69,105 @@ num_nodes = 2 [cmd_args.lmcache] controller_cmd = "lmcache_controller --host localhost --port 9000 --monitor-port 9001" - - [cmd_args.lmcache.args] - chunk_size = 256 - local_cpu = false - nixl_buffer_size = 10737418240 - nixl_buffer_device = "cuda" - extra_config_enable_nixl_storage = true - extra_config_nixl_backend = "GDS_MT" - extra_config_nixl_file_pool_size = 64 - extra_config_nixl_path = "%CACHEDIR%" - - enable_controller = true - lmcache_instance_id = "lmcache_default_instance" - controller_url = "localhost:9001" - lmcache_worker_port = 8788 - distributed_url = "localhost:8789" + + [cmd_args.lmcache.args] + chunk_size = 256 + local_cpu = false + nixl_buffer_size = 10737418240 + nixl_buffer_device = "cuda" + extra_config_enable_nixl_storage = true + extra_config_nixl_backend = "GDS_MT" + extra_config_nixl_file_pool_size = 64 + extra_config_nixl_path = "%CACHEDIR%" + + enable_controller = true + lmcache_instance_id = "lmcache_default_instance" + controller_url = "localhost:9001" + lmcache_worker_port = 8788 + distributed_url = "localhost:8789" [cmd_args.genai_perf] cmd = "genai-perf profile" extra-args = "--streaming --verbose -- -v --async" - [cmd_args.genai_perf.args] - model = "%MODEL%" - url = "%URL%" - endpoint = "%ENDPOINT%" - endpoint-type = "chat" - artifact-dir = "%RESULTS_DIR%/genai_perf_artifacts" - profile-export-file = "profile.json" - extra-inputs = 'min_tokens:10' - output-tokens-mean = 500 - output-tokens-stddev = 0 - random-seed = 123 - request-count = 50 - synthetic-input-tokens-mean = 300 - synthetic-input-tokens-stddev = 0 - warmup-request-count = 5 - concurrency = 2 + [cmd_args.genai_perf.args] + model = "%MODEL%" + url = "%URL%" + endpoint = "%ENDPOINT%" + endpoint-type = "chat" + artifact-dir = "%RESULTS_DIR%/genai_perf_artifacts" + profile-export-file = "profile.json" + extra-inputs = 'min_tokens:10' + output-tokens-mean = 500 + output-tokens-stddev = 0 + random-seed = 123 + request-count = 50 + synthetic-input-tokens-mean = 300 + synthetic-input-tokens-stddev = 0 + warmup-request-count = 5 + concurrency = 2 [cmd_args.aiperf] - cmd = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" - - [cmd_args.aiperf.args] - model = "%MODEL%" - url = "%URL%" - endpoint = "%ENDPOINT%" - artifact-dir = "%RESULTS_DIR%/aiperf" - endpoint-type = "chat" - streaming = true - warmup-request-count = 1 - export-level = "raw" - benchmark-duration = 100 - - # Server metrics collection - set in test_scenario with correct service names per test - # LMCache metrics are exposed via vLLM worker's /metrics endpoint - server-metrics-formats = ["json", "csv"] - - # initla prompt the same for all users - shared-system-prompt-length = 1024 - # 3K per-user context: unique per session, requires num-dataset-entries - user-context-prompt-length = 3072 - #user sends eeach iteration 1023 - synthetic-input-tokens-mean = 1024 - # user gets each iteration 100 - osl = 100 - num-dataset-entries = 10 - - # Multi-turn conversation settings: 10 users, 20 turns each, message every 1 sec - user-centric-rate = 10.0 - num-users = 10 - conversation-turn-mean = 20 - - # 1 second delay between turns (simulates user think time) - conversation-turn-delay-mean = 1000 - - # Turn sequence: 1K ISL / 100 OSL for all 20 turns - turn-sequence = "1024,100*20" + cmd = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" + + [cmd_args.aiperf.args] + model = "%MODEL%" + url = "%URL%" + endpoint = "%ENDPOINT%" + artifact-dir = "%RESULTS_DIR%/aiperf" + endpoint-type = "chat" + streaming = true + warmup-request-count = 1 + export-level = "raw" + benchmark-duration = 100 + + # Server metrics collection - set in test_scenario with correct service names per test + # LMCache metrics are exposed via vLLM worker's /metrics endpoint + server-metrics-formats = ["json", "csv"] + + # initla prompt the same for all users + shared-system-prompt-length = 1024 + # 3K per-user context: unique per session, requires num-dataset-entries + user-context-prompt-length = 3072 + #user sends eeach iteration 1023 + synthetic-input-tokens-mean = 1024 + # user gets each iteration 100 + osl = 100 + num-dataset-entries = 10 + + # Multi-turn conversation settings: 10 users, 20 turns each, message every 1 sec + user-centric-rate = 10.0 + num-users = 10 + conversation-turn-mean = 20 + + # 1 second delay between turns (simulates user think time) + conversation-turn-delay-mean = 1000 + + # Turn sequence: 1K ISL / 100 OSL for all 20 turns + turn-sequence = "1024,100*20" [cmd_args.lmbench] - cmd = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" - - [cmd_args.lmbench.args] - num-users = 15 - num-rounds = 20 - qps = 0.1 - shared-system-prompt = 1000 - user-history-prompt = 20000 - answer-len = 100 - model = "%MODEL%" - base-url = "%URL%" - init-user-id = "1" - log-interval = 30 - time = "100" + cmd = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" + + [cmd_args.lmbench.args] + num-users = 15 + num-rounds = 20 + qps = 0.1 + shared-system-prompt = 1000 + user-history-prompt = 20000 + answer-len = 100 + model = "%MODEL%" + base-url = "%URL%" + init-user-id = "1" + log-interval = 30 + time = "100" [cmd_args.custom_workload] cmd = "hostname" isl = "1000,2000,4000,8000,16000,24000,32000,48000,64000,96000,128000" - - [cmd_args.custom_workload.args] + + [cmd_args.custom_workload.args] [extra_env_vars] UCX_LOG_LEVEL = "warn" @@ -175,4 +175,4 @@ HF_HUB_OFFLINE = "1" TRANSFORMERS_OFFLINE = "1" HF_DATASETS_OFFLINE = "1" DYNAMO_NODELIST = "$(scontrol show hostname $SLURM_JOB_NODELIST | tr -s '\\n' ',')" -UCX_TLS = "cuda_copy,rc_x" \ No newline at end of file +UCX_TLS = "cuda_copy,rc_x" diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml index b604b8378..fd5db3088 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml @@ -1,3 +1,19 @@ +# SPDX-FileCopyrightText: NVIDIA CORPORATION & AFFILIATES +# Copyright (c) 2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + name = "vllm_kvbm" [[Tests]] @@ -5,68 +21,66 @@ id = "vLLM-KVBM-Qwen3-8B" test_name = "vLLM-KVBM-Qwen3-8B" time_limit = "20:00:00" -extra_container_mounts = [ - "/run/udev:/run/udev", - "/tmp:/tmp"] +extra_container_mounts = ["/run/udev:/run/udev", "/tmp:/tmp"] [Tests.cmd_args] #storage_cache_dir = "/raid/users/kapila" storage_cache_dir = "/mnt/vast/kapila" hf_home_path = "/mnt/vast/disagg_inf/huggingface" - num_nodes = 2 # 1 prefill node + 1 decode node + num_nodes = 2 # 1 prefill node + 1 decode node + + [Tests.cmd_args.dynamo] + model = "Qwen/Qwen3-8B" + connector = "none" #"kvbm" #["none", "kvbm"] + ingress-cmd = "python -m dynamo.frontend" # --router-mode kv --no-kv-events --kv-overlap-score-weight=0" # --router-mode kv" + workloads = "custom_workload.sh" #,genai_perf.sh,lmbench.sh" + #node-setup-cmd = "/usr/local/ucx/bin/ucx_info -d |grep Transport | sort -u; (cd /opt/dynamo/venv/lib/python3.12/site-packages/dynamo && patch -p4 < /cloudai_install/clear_kv_blocks_engine_route.patch)" + node-setup-cmd = "hostname" - [Tests.cmd_args.dynamo] - model = "Qwen/Qwen3-8B" - connector = "none" #"kvbm" #["none", "kvbm"] - ingress-cmd = "python -m dynamo.frontend" # --router-mode kv --no-kv-events --kv-overlap-score-weight=0" # --router-mode kv" - workloads = "custom_workload.sh" #,genai_perf.sh,lmbench.sh" - #node-setup-cmd = "/usr/local/ucx/bin/ucx_info -d |grep Transport | sort -u; (cd /opt/dynamo/venv/lib/python3.12/site-packages/dynamo && patch -p4 < /cloudai_install/clear_kv_blocks_engine_route.patch)" - node-setup-cmd = "hostname" + [Tests.cmd_args.dynamo.prefill_worker] + num-nodes = 1 - [Tests.cmd_args.dynamo.prefill_worker] - num-nodes = 1 + [Tests.cmd_args.dynamo.prefill_worker.args] + block-size = 32 + tensor-parallel-size = 1 - [Tests.cmd_args.dynamo.prefill_worker.args] - block-size = 32 - tensor-parallel-size = 1 + [Tests.cmd_args.dynamo.decode_worker] + num-nodes = 1 - [Tests.cmd_args.dynamo.decode_worker] - num-nodes = 1 + [Tests.cmd_args.dynamo.decode_worker.args] + block-size = 32 + tensor-parallel-size = 1 - [Tests.cmd_args.dynamo.decode_worker.args] - block-size = 32 - tensor-parallel-size = 1 + [Tests.cmd_args.custom_workload] + isl = "1000,2000,4000,8000,16000,24000,32000,40000" - [Tests.cmd_args.custom_workload] - isl = "1000,2000,4000,8000,16000,24000,32000,40000" - [Tests.extra_env_vars] - CUFILE_LOG_LEVEL="INFO" - CUFILE_LOGGING_LEVEL="INFO" - PYTHONHASHSEED="0" + CUFILE_LOG_LEVEL = "INFO" + CUFILE_LOGGING_LEVEL = "INFO" + PYTHONHASHSEED = "0" # Dynamo Flags - DYN_LOG="info" - DYN_SYSTEM_PORT="8081" # Enable system metrics - + DYN_LOG = "info" + DYN_SYSTEM_PORT = "8081" # Enable system metrics + # KVBM Flags - DYN_KVBM_METRICS="1" - DYN_KVBM_METRICS_PORT="6880" # Default port + DYN_KVBM_METRICS = "1" + DYN_KVBM_METRICS_PORT = "6880" # Default port # set a large timeout for allocating the disk - DYN_KVBM_LEADER_WORKER_INIT_TIMEOUT_SECS="1200" - DYN_KVBM_DISABLE_DISK_OFFLOAD_FILTER="1" # Force KV cache write on first request + DYN_KVBM_LEADER_WORKER_INIT_TIMEOUT_SECS = "1200" + DYN_KVBM_DISABLE_DISK_OFFLOAD_FILTER = "1" # Force KV cache write on first request # Use it only on vast. #DYN_KVBM_DISK_ZEROFILL_FALLBACK="true" # set a relatively small CPU cache, so we can do quick disk onboarding - DYN_KVBM_CPU_CACHE_GB="50" + DYN_KVBM_CPU_CACHE_GB = "50" # set a large disk cache, so we are actually testing the NIXL with onboarding #DYN_KVBM_DISK_CACHE_GB="100" - DYN_KVBM_NIXL_BACKEND_UCX="True" - DYN_KVBM_NIXL_BACKEND_GDS="True" + DYN_KVBM_NIXL_BACKEND_UCX = "True" + DYN_KVBM_NIXL_BACKEND_GDS = "True" # vLLM Flags - VLLM_SERVER_DEV_MODE = "1" \ No newline at end of file + VLLM_SERVER_DEV_MODE = "1" diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml index d0f55058c..33e5f113b 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml @@ -23,19 +23,19 @@ test_name = "vLLM-Qwen3-0.6B" time_limit = "00:10:00" [Tests.cmd_args] - num_nodes = 2 # 1 prefill node + 1 decode node + num_nodes = 2 # 1 prefill node + 1 decode node - [Tests.cmd_args.dynamo.prefill_worker] - num-nodes = 1 - [Tests.cmd_args.dynamo.prefill_worker.args] - tensor-parallel-size = 4 - pipeline-parallel-size = 1 + [Tests.cmd_args.dynamo.prefill_worker] + num-nodes = 1 + [Tests.cmd_args.dynamo.prefill_worker.args] + tensor-parallel-size = 4 + pipeline-parallel-size = 1 - [Tests.cmd_args.dynamo.decode_worker] - num-nodes = 1 - [Tests.cmd_args.dynamo.decode_worker.args] - tensor-parallel-size = 4 - pipeline-parallel-size = 1 + [Tests.cmd_args.dynamo.decode_worker] + num-nodes = 1 + [Tests.cmd_args.dynamo.decode_worker.args] + tensor-parallel-size = 4 + pipeline-parallel-size = 1 [[Tests]] id = "test.disagg.multinode" @@ -43,16 +43,16 @@ test_name = "vLLM-Qwen3-0.6B" time_limit = "00:10:00" [Tests.cmd_args] - num_nodes = 4 # 2 prefill nodes + 2 decode nodes - - [Tests.cmd_args.dynamo.prefill_worker] - num-nodes = 2 - [Tests.cmd_args.dynamo.prefill_worker.args] - tensor-parallel-size = 4 - pipeline-parallel-size = 1 - - [Tests.cmd_args.dynamo.decode_worker] - num-nodes = 2 - [Tests.cmd_args.dynamo.decode_worker.args] - tensor-parallel-size = 4 - pipeline-parallel-size = 1 + num_nodes = 4 # 2 prefill nodes + 2 decode nodes + + [Tests.cmd_args.dynamo.prefill_worker] + num-nodes = 2 + [Tests.cmd_args.dynamo.prefill_worker.args] + tensor-parallel-size = 4 + pipeline-parallel-size = 1 + + [Tests.cmd_args.dynamo.decode_worker] + num-nodes = 2 + [Tests.cmd_args.dynamo.decode_worker.args] + tensor-parallel-size = 4 + pipeline-parallel-size = 1 diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index 83c2a7116..6d76b1014 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -16,7 +16,7 @@ import logging from pathlib import Path -from typing import Optional +from typing import Literal, Optional from pydantic import ( AliasChoices, @@ -37,6 +37,7 @@ ) from cloudai.models.workload import CmdArgs, TestDefinition + class Args(BaseModel): """Arguments for custom workloads.""" @@ -220,10 +221,9 @@ class GenAIPerf(Workload): cmd: str = "genai-perf profile" script: File = File(Path(__file__).parent.parent / "ai_dynamo/genai_perf.sh") -class AIPerfArgs(BaseModel): - """Arguments for AIPerf profiling - alternative to GenAI-Perf.""" - model_config = ConfigDict(extra="allow", populate_by_name=True) +class AIPerfArgs(Args): + """Arguments for AIPerf profiling - alternative to GenAI-Perf.""" concurrency: int | None = Field(default=None) request_rate: float | None = Field( @@ -281,7 +281,10 @@ class AIPerfArgs(BaseModel): default="records", serialization_alias="export-level", validation_alias=AliasChoices("export-level", "export_level"), - description="Controls output detail: summary (aggregate only), records (per-request metrics), raw (full request/response data)", + description=( + "Controls output detail: summary (aggregate only)," + " records (per-request metrics), raw (full request/response data)" + ), ) slice_duration: float | None = Field( default=5.0, @@ -340,7 +343,7 @@ class AIPerf(Workload): name: str = "aiperf" cmd: str = "aiperf profile" - args: AIPerfArgs = Field(default_factory=AIPerfArgs) + args: Optional[Args] = Field(default_factory=AIPerfArgs) script: File = File(Path(__file__).parent.parent / "ai_dynamo/aiperf.sh") extra_args: str | None = Field( @@ -473,7 +476,7 @@ def installables(self) -> list[Installable]: result = [ self.docker_image, self.script, - #self.hf_model, + # self.hf_model, self.genai_perf_script, self.aiperf_script, self.calc_percentile_csv, @@ -489,12 +492,14 @@ def was_run_successful(self, tr: TestRun) -> JobStatusResult: output_path = tr.output_path result = True workload_map = self.get_workload_map() - for workload in tr.test.workloads.split(","): + for workload in self.workloads.split(","): if workload not in workload_map: logging.info(f"Workload {workload} not found in workload map") result = False continue - workload_csv_file = output_path / workload_map[workload].report_name + report_name = workload_map[workload].report_name + assert report_name is not None + workload_csv_file = output_path / report_name if not workload_csv_file.exists(): logging.info(f"Result file ({workload_csv_file.absolute()}) not found for workload: {workload}") result = False diff --git a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py index 640722782..cc0a0ba37 100644 --- a/src/cloudai/workloads/ai_dynamo/openai_chat_client.py +++ b/src/cloudai/workloads/ai_dynamo/openai_chat_client.py @@ -43,7 +43,7 @@ def log(message: str) -> None: - print(f"[{time.strftime('%Y-%m-%d %H:%M:%S') } {os.getenv('HOSTNAME') or ''}]: [openai_chat_client] {message}") + print(f"[{time.strftime('%Y-%m-%d %H:%M:%S')} {os.getenv('HOSTNAME') or ''}]: [openai_chat_client] {message}") sys.stdout.flush() sys.stderr.flush() @@ -122,6 +122,8 @@ def stream(self, client: OpenAI, max_tokens: int) -> TtftStats: class KVCacheFlusher: + """Flushes the KV cache by streaming filler chat completions.""" + def __init__(self, args: argparse.Namespace, client: OpenAI): self.client = client self.args = args @@ -129,9 +131,8 @@ def __init__(self, args: argparse.Namespace, client: OpenAI): def flush(self) -> None: log(f"Stream {self.args.num_filler_prompts} filler chats with {self.args.num_filler_tokens} tokens each...") - for n, chat in enumerate(self.filler_chats): - stats = chat.stream(self.client, 1) - #log(f"Filler chat {n + 1} of {len(self.filler_chats)}: {stats.ttft_seconds:.3f}s with {stats.cached_tokens} cached tokens") + for _n, chat in enumerate(self.filler_chats): + chat.stream(self.client, 1) # ---------------- command-line parsing -------------------------------- @@ -146,8 +147,18 @@ def parse_args() -> argparse.Namespace: ap.add_argument("--isl", type=int, help="Input tokens.") ap.add_argument("--osl", type=int, help="Output tokens.") ap.add_argument("--out", help="JSONL file for results.") - ap.add_argument("--max_filler_prompts", type=int, default=200, help="Max number of filler prompts (used to compute the KV cache token size) to send for cache flush.") - ap.add_argument("--min_filler_prompts", type=int, default=1, help="Min number of filler prompts (used to compute the KV cache token size) to send for cache flush.") + ap.add_argument( + "--max_filler_prompts", + type=int, + default=200, + help="Max number of filler prompts (used to compute the KV cache token size) to send for cache flush.", + ) + ap.add_argument( + "--min_filler_prompts", + type=int, + default=1, + help="Min number of filler prompts (used to compute the KV cache token size) to send for cache flush.", + ) ap.add_argument( "--num_filler_prompts", type=int, @@ -183,13 +194,16 @@ def compute_kv_cache_token_size(args: argparse.Namespace, client: OpenAI) -> int # Do a binary search for the number of filler prompts required to flush the KV cache. maxFillerPrompts = args.max_filler_prompts minFillerPrompts = min(1, args.min_filler_prompts) - log(f"Doing binary search for the number of filler prompts required to flush the KV cache between {minFillerPrompts} and {maxFillerPrompts}...") + log( + f"Doing binary search for the number of filler prompts required to flush the KV cache" + f" between {minFillerPrompts} and {maxFillerPrompts}..." + ) - log(f"Sending an initialcanary query with 1000 tokens...") + log("Sending an initial canary query with 1000 tokens...") canary_chat = Chat(args.model, args.isl) canary_stats = canary_chat.stream(client, 1) log(f"Initial Canary query: {canary_stats.ttft_seconds:.3f}s with {canary_stats.cached_tokens} cached tokens") - + while minFillerPrompts < maxFillerPrompts: numFillerPrompts = (maxFillerPrompts + minFillerPrompts) // 2 log(f"Trying {numFillerPrompts} filler prompts with {args.num_filler_tokens} tokens each...") @@ -204,7 +218,7 @@ def compute_kv_cache_token_size(args: argparse.Namespace, client: OpenAI) -> int log(f"Min filler prompts: {minFillerPrompts}, Max filler prompts: {maxFillerPrompts}") return minFillerPrompts * args.num_filler_tokens - + # ---------------- main routine ---------------------------------------- def main() -> None: args = parse_args() @@ -222,8 +236,8 @@ def main() -> None: client = OpenAI(base_url=args.url, api_key="dummy-key-for-local-server") if args.compute_kv_cache_token_size: - log(f"Computing KV cache token size...") - kv_cache_token_size = compute_kv_cache_token_size(args, client); + log("Computing KV cache token size...") + kv_cache_token_size = compute_kv_cache_token_size(args, client) log(f"KV cache token size: {kv_cache_token_size}") with Path(args.out).open("a", encoding="utf-8") as f: f.write(f"KV cache token size: {kv_cache_token_size}\n") @@ -237,10 +251,6 @@ def main() -> None: chat = Chat(args.model, args.isl) - # Flush cache - #log(f"Flushing KV-cache with {args.num_filler_prompts} prompts …") - #flusher.flush() - log("=== Run 1: warmup ===") warmup = Chat(args.model, args.isl).stream(client, 1) log(f"Run 1: warmup: TTFT = {warmup.ttft_seconds:.3f}s with {warmup.cached_tokens} cached tokens") diff --git a/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py b/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py index 6c5cafe0c..c9bef344b 100644 --- a/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/report_generation_strategy.py @@ -27,6 +27,7 @@ if TYPE_CHECKING: pass + class AIDynamoReportGenerationStrategy(ReportGenerationStrategy): """Strategy for generating reports from AI Dynamo run directories.""" diff --git a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py index 04630bab7..cc1ea2caf 100644 --- a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py @@ -115,8 +115,9 @@ def _gen_script_args(self, td: AIDynamoTestDefinition) -> List[str]: args.extend(self._get_nested_toml_args(td.cmd_args.lmcache, "--lmcache-")) args.extend(self._get_nested_toml_args(td.cmd_args.genai_perf, "--genai_perf-")) + args.extend(self._get_nested_toml_args(td.cmd_args.aiperf, "--aiperf-")) args.extend(self._get_nested_toml_args(td.cmd_args.lmbench, "--lmbench-")) - args.extend(self._get_nested_toml_args(td.cmd_args.custom_workload, "--custom_workload-")) + args.extend(self._get_nested_toml_args(td.cmd_args.kvstorage, "--kvstorage-")) return args From 5aad90951a694bc8bf4ec1b54b32761efd67a91c Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 17:53:48 +0000 Subject: [PATCH 13/22] Misc fixes for aiperf. --- conf/experimental/ai_dynamo/test/vllm.toml | 10 +- .../test_scenario/vllm_kvbm_slurm.toml | 27 +-- .../ai_dynamo/test_scenario/vllm_slurm.toml | 2 + src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 47 +++- src/cloudai/workloads/ai_dynamo/aiperf.sh | 22 +- src/cloudai/workloads/ai_dynamo/genai_perf.sh | 213 +++++++++++------- 6 files changed, 210 insertions(+), 111 deletions(-) diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index 75769e543..b775213ef 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -21,6 +21,7 @@ test_template_name = "AIDynamo" [cmd_args] docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.8.1" num_nodes = 2 +hf_home_path = "/opt/shared/huggingface" [cmd_args.dynamo] backend = "vllm" @@ -108,7 +109,9 @@ num_nodes = 2 concurrency = 2 [cmd_args.aiperf] - cmd = "python3 ./synthetic-multi-round-qa/multi-round-qa.py" + cmd = "aiperf profile" + extra-args = "--streaming" + version = "git+https://github.com/ai-dynamo/aiperf.git@b1d116496a8247b254a7cd3b14b2f218685255d3" [cmd_args.aiperf.args] model = "%MODEL%" @@ -116,14 +119,13 @@ num_nodes = 2 endpoint = "%ENDPOINT%" artifact-dir = "%RESULTS_DIR%/aiperf" endpoint-type = "chat" - streaming = true warmup-request-count = 1 export-level = "raw" benchmark-duration = 100 # Server metrics collection - set in test_scenario with correct service names per test # LMCache metrics are exposed via vLLM worker's /metrics endpoint - server-metrics-formats = ["json", "csv"] + server-metrics-formats = "json,csv" # initla prompt the same for all users shared-system-prompt-length = 1024 @@ -144,7 +146,7 @@ num_nodes = 2 conversation-turn-delay-mean = 1000 # Turn sequence: 1K ISL / 100 OSL for all 20 turns - turn-sequence = "1024,100*20" + #turn-sequence = "1024,100*20" # Removed by Kapil [cmd_args.lmbench] diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml index fd5db3088..7c26936e8 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml @@ -14,26 +14,26 @@ # See the License for the specific language governing permissions and # limitations under the License. -name = "vllm_kvbm" +name = "dynamo_vllm_kvbm" [[Tests]] -id = "vLLM-KVBM-Qwen3-8B" -test_name = "vLLM-KVBM-Qwen3-8B" +id = "vLLM-Qwen3-0.6B" +test_name = "vLLM-Qwen3-0.6B" time_limit = "20:00:00" extra_container_mounts = ["/run/udev:/run/udev", "/tmp:/tmp"] [Tests.cmd_args] #storage_cache_dir = "/raid/users/kapila" - storage_cache_dir = "/mnt/vast/kapila" - hf_home_path = "/mnt/vast/disagg_inf/huggingface" + #storage_cache_dir = "/mnt/vast/kapila" + #hf_home_path = "/mnt/vast/disagg_inf/huggingface" num_nodes = 2 # 1 prefill node + 1 decode node [Tests.cmd_args.dynamo] - model = "Qwen/Qwen3-8B" - connector = "none" #"kvbm" #["none", "kvbm"] - ingress-cmd = "python -m dynamo.frontend" # --router-mode kv --no-kv-events --kv-overlap-score-weight=0" # --router-mode kv" - workloads = "custom_workload.sh" #,genai_perf.sh,lmbench.sh" + model = "Qwen/Qwen3-0.6B" + connector = "kvbm nixl" #"kvbm" #["none", "kvbm"] + ingress-cmd = "python -m dynamo.frontend --router-mode kv" # --router-mode kv --no-kv-events --kv-overlap-score-weight=0" # --router-mode kv" + workloads = "aiperf.sh" #,genai_perf.sh,lmbench.sh" #node-setup-cmd = "/usr/local/ucx/bin/ucx_info -d |grep Transport | sort -u; (cd /opt/dynamo/venv/lib/python3.12/site-packages/dynamo && patch -p4 < /cloudai_install/clear_kv_blocks_engine_route.patch)" node-setup-cmd = "hostname" @@ -41,18 +41,13 @@ extra_container_mounts = ["/run/udev:/run/udev", "/tmp:/tmp"] num-nodes = 1 [Tests.cmd_args.dynamo.prefill_worker.args] - block-size = 32 - tensor-parallel-size = 1 + tensor-parallel-size = 2 [Tests.cmd_args.dynamo.decode_worker] num-nodes = 1 [Tests.cmd_args.dynamo.decode_worker.args] - block-size = 32 - tensor-parallel-size = 1 - - [Tests.cmd_args.custom_workload] - isl = "1000,2000,4000,8000,16000,24000,32000,40000" + tensor-parallel-size = 2 [Tests.extra_env_vars] CUFILE_LOG_LEVEL = "INFO" diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml index 33e5f113b..45858d925 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_slurm.toml @@ -24,6 +24,8 @@ time_limit = "00:10:00" [Tests.cmd_args] num_nodes = 2 # 1 prefill node + 1 decode node + #storage_cache_dir = "/opt/shared" + hf_home_path = "/opt/shared/huggingface" [Tests.cmd_args.dynamo.prefill_worker] num-nodes = 1 diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index b02e39de0..32a87f8a3 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -785,13 +785,21 @@ function launch_decode() local nixl_port=$((base_nixl_port + (i * tp_size))) local kv_event_port=$((base_kv_event_port + i)) + # Build decode args as proper bash arrays to preserve + # multi-word values (e.g. --cmd "aiperf profile") through word splitting. + local -a args_arr=() + for key in "${!decode_args[@]}"; do + args_arr+=("$key" "$(replace_placeholders "${decode_args[$key]}")") + done + log "Launching decode worker $i on GPUs $gpu_list (NIXL port: $nixl_port, KV event port: $kv_event_port)" - log "Decode cmd: ${decode_config["cmd"]} $(array_to_args decode_args) ${decode_args["--extra-args"]}" + log "Decode cmd: ${decode_config["cmd"]} ${args_arr[*]} ${decode_config["extra-args"]}" CUDA_VISIBLE_DEVICES=$gpu_list \ VLLM_NIXL_SIDE_CHANNEL_PORT=$nixl_port \ DYN_VLLM_KV_EVENT_PORT=$kv_event_port \ ${decode_config["cmd"]} \ - $(array_to_args decode_args) ${decode_args["--extra-args"]} > $log_file 2>&1 & + "${args_arr[@]}" \ + ${decode_config["extra-args"]} > $log_file 2>&1 & done } @@ -828,13 +836,21 @@ function launch_prefill() local nixl_port=$((base_nixl_port + (i * tp_size))) local kv_event_port=$((base_kv_event_port + i)) + # Build prefill args as proper bash arrays to preserve + # multi-word values (e.g. --cmd "aiperf profile") through word splitting. + local -a args_arr=() + for key in "${!prefill_args[@]}"; do + args_arr+=("$key" "$(replace_placeholders "${prefill_args[$key]}")") + done + log "Launching prefill worker $i on GPUs $gpu_list (NIXL port: $nixl_port, KV event port: $kv_event_port)" - log "Prefill cmd: ${prefill_config["cmd"]} $(array_to_args prefill_args) ${prefill_args["--extra-args"]}" + log "Prefill cmd: ${prefill_config["cmd"]} ${args_arr[*]} ${prefill_config["extra-args"]}" CUDA_VISIBLE_DEVICES=$gpu_list \ VLLM_NIXL_SIDE_CHANNEL_PORT=$nixl_port \ DYN_VLLM_KV_EVENT_PORT=$kv_event_port \ ${prefill_config["cmd"]} \ - $(array_to_args prefill_args) ${prefill_args["--extra-args"]} > $log_file 2>&1 & + "${args_arr[@]}" \ + ${prefill_config["extra-args"]} > $log_file 2>&1 & done } @@ -1011,13 +1027,22 @@ function launch_workload() local workload_name="${workload_config_ref["--name"]}" local script="${workload_config_ref["--script"]}" - local expanded_config=$(array_to_args "$workload_config_name") - local expanded_arguments=$(array_to_args "$workload_args_name") - log "Launching $workload_name with cmd: ${INSTALL_DIR}/$script $expanded_config -- $expanded_arguments" + # Build config and workload args as proper bash arrays to preserve + # multi-word values (e.g. --cmd "aiperf profile") through word splitting. + local -a config_arr=() + for key in "${!workload_config_ref[@]}"; do + config_arr+=("$key" "$(replace_placeholders "${workload_config_ref[$key]}")") + done + + local -a args_arr=() + for key in "${!workload_args_ref[@]}"; do + args_arr+=("$key" "$(replace_placeholders "${workload_args_ref[$key]}")") + done + + log "Launching $workload_name with cmd: ${INSTALL_DIR}/$script ${config_arr[*]} -- ${args_arr[*]}" - # Use eval to properly handle values with spaces in expanded_config - bash ${INSTALL_DIR}/$script \ + bash "${INSTALL_DIR}/$script" \ --install_dir "$INSTALL_DIR" \ --result_dir "$RESULTS_DIR" \ --model "${dynamo_args["model"]}" \ @@ -1028,8 +1053,8 @@ function launch_workload() --connector "${dynamo_args["connector"]}" \ --kvbm_metrics_port "${DYN_KVBM_METRICS_PORT:-6880}" \ --decode-nodes "${decode_config["node-list"]}" \ - $expanded_config \ - -- $expanded_arguments > ${RESULTS_DIR}/$workload_name.log 2>&1 + "${config_arr[@]}" \ + -- "${args_arr[@]}" > "${RESULTS_DIR}/$workload_name.log" 2>&1 log "Done with $workload_name run" } diff --git a/src/cloudai/workloads/ai_dynamo/aiperf.sh b/src/cloudai/workloads/ai_dynamo/aiperf.sh index f692c6e28..8f0e32a43 100644 --- a/src/cloudai/workloads/ai_dynamo/aiperf.sh +++ b/src/cloudai/workloads/ai_dynamo/aiperf.sh @@ -14,6 +14,7 @@ declare -A aiperf_args decode_nodes="" aiperf_profile_csv="profile_export_aiperf.csv" metrics_urls="" +version="" # Simple log function log() { @@ -75,6 +76,10 @@ function process_args() cmd="$2" shift 2 ;; + --version) + version="$2" + shift 2 + ;; --decode-nodes) decode_nodes="$2" shift 2 @@ -104,10 +109,23 @@ function process_args() result_dir: $result_dir install_dir: $install_dir report_name: $report_name - aiperf_args: $(for key in "${!aiperf_args[@]}"; do echo -n "$key: ${aiperf_args[$key]} "; done) + cmd: $cmd + version: $version + extra_args: $extra_args + decode_nodes: $decode_nodes + aiperf_args: + $(for key in "${!aiperf_args[@]}"; do echo "$key ${aiperf_args[$key]} "; done) """ } +function update_aiperf_version() +{ + if [[ -n "$version" ]]; then + log "Updating aiperf version from $(aiperf --version) to $version" + pip install --upgrade $version + log "Updated aiperf version to $(aiperf --version)" + fi +} function _resolve_server_metrics_auto() { @@ -136,6 +154,8 @@ function main() report_file=$result_dir/$report_name + update_aiperf_version + # Handle server-metrics = "auto" - auto-discover endpoints if [[ "${aiperf_args["--server-metrics"]}" == "auto" ]]; then _resolve_server_metrics_auto diff --git a/src/cloudai/workloads/ai_dynamo/genai_perf.sh b/src/cloudai/workloads/ai_dynamo/genai_perf.sh index 8a1d29483..c475d5cc8 100644 --- a/src/cloudai/workloads/ai_dynamo/genai_perf.sh +++ b/src/cloudai/workloads/ai_dynamo/genai_perf.sh @@ -1,12 +1,7 @@ #! /bin/bash # Called as: - # ./genai_perf_wrapper.sh --result_dir --report_file --calc_percentile_csv_script --gpus_per_node -- - -# Simple log function -log() { - echo "[$(date +%F\ %T) $(hostname)]: $*" -} + # ./genai_perf.sh --result_dir --report_file --calc_percentile_csv_script --gpus_per_node -- # extract result_dir, report_file, and calc_percentile_csv_script from the command line arguments result_dir="" @@ -16,91 +11,151 @@ gpus_per_node=1 port="" repo="" cmd="" -cmdline_args=() extra_args="" +declare -A genai_perf_args -while [[ $# -gt 0 ]]; do - case "$1" in - --model) - model="$2" - shift 2 - ;; - --url) - url="$2" - shift 2 - ;; - --port) - port="$2" - shift 2 - ;; - --endpoint) - endpoint="$2" - shift 2 - ;; - --result_dir) - result_dir="$2" - shift 2 - ;; - --install_dir) - install_dir="$2" - shift 2 - ;; - --gpus_per_node) - gpus_per_node="$2" - shift 2 - ;; - --report_name) - report_name="$2" - shift 2 - ;; - --cmd) - cmd="$2" - shift 2 - ;; - --extra-args|--extra_args) - extra_args="$2" - shift 2 - ;; - --) - shift - cmdline_args=("$@") - break - ;; +# Simple log function +log() { + echo "[$(date +%F\ %T) $(hostname)]: $*" +} + +function parse_genai_perf_args() +{ + while [[ $# -gt 0 ]]; do + case "$1" in --*) + genai_perf_args["${1}"]="$2" shift 2 ;; *) shift ;; - esac -done + esac + done +} -# Combine cmdline_args and extra_args -cmdline_args="${cmdline_args[*]} $extra_args" +function process_args() +{ + while [[ $# -gt 0 ]]; do + case "$1" in + --model) + model="$2" + shift 2 + ;; + --url) + url="$2" + shift 2 + ;; + --port) + port="$2" + shift 2 + ;; + --endpoint) + endpoint="$2" + shift 2 + ;; + --result_dir) + result_dir="$2" + shift 2 + ;; + --install_dir) + install_dir="$2" + shift 2 + ;; + --gpus_per_node) + gpus_per_node="$2" + shift 2 + ;; + --report_name) + report_name="$2" + shift 2 + ;; + --cmd) + cmd="$2" + shift 2 + ;; + --extra-args|--extra_args) + extra_args="$2" + shift 2 + ;; + --) + shift + parse_genai_perf_args "$@" + break + ;; + --*) + shift 2 + ;; + *) + shift + ;; + esac + done -# Build the full command with model and url -full_cmd="$cmd $cmdline_args" + log """Parsed args: + model: $model + url: $url + port: $port + result_dir: $result_dir + install_dir: $install_dir + report_name: $report_name + cmd: $cmd + extra_args: $extra_args + genai_perf_args: $(for key in "${!genai_perf_args[@]}"; do echo " $key ${genai_perf_args[$key]} "; done) + """ +} + +function process_results() +{ + # Calculate total GPUs - use SLURM_JOB_NUM_NODES if available, otherwise default to 1 node + local num_nodes=${SLURM_JOB_NUM_NODES:-1} + local total_gpus=$(( $gpus_per_node * $num_nodes )) + + local profile_path=$(find "$result_dir" -type f -name "profile_genai_perf.csv" -print -quit) + if [[ -f "$profile_path" ]]; then + sed -i 's/\r//g' "$profile_path" + local output_tokens_per_second=$(grep "Output Token Throughput (tokens/sec)" "$profile_path" | cut -d ',' -f 2) + local output_tokens_per_second_per_gpu=$(awk "BEGIN {printf \"%.2f\", $output_tokens_per_second / $total_gpus}") + local request_throughput=$(grep "Request Throughput (per sec)" "$profile_path" | cut -d ',' -f 2) + local request_count=$(grep "Request Count (count)" "$profile_path" | cut -d ',' -f 2) + grep ".*,.*,.*,.*" "$profile_path" > "$result_dir/$report_name" + echo "Output tokens per second per gpu,$output_tokens_per_second_per_gpu,0,0,0,0,0,0,0,0,0,0,0" >> "$result_dir/$report_name" + echo "Request throughput per second,$request_throughput,0,0,0,0,0,0,0,0,0,0,0" >> "$result_dir/$report_name" + echo "Request count,$request_count,0,0,0,0,0,0,0,0,0,0,0" >> "$result_dir/$report_name" + fi +} -# launch genai-perf -log "Launching genai-perf with args: $full_cmd" +function main() +{ + process_args "$@" -eval "$full_cmd" + report_file=$result_dir/$report_name -log "Done with genai-perf run" + # Combine genai_perf_args (key-value pairs) and extra_args + cmdline_args="" + for key in "${!genai_perf_args[@]}"; do + local val="${genai_perf_args[$key]}" + # Quote values that contain spaces so eval doesn't split them + if [[ "$val" == *" "* ]]; then + val="${val//\"/\\\"}" # Escape existing quotes + cmdline_args+="$key \"${val}\" " + else + cmdline_args+="$key ${val} " + fi + done + cmdline_args+="$extra_args" -# Calculate total GPUs - use SLURM_JOB_NUM_NODES if available, otherwise default to 1 node -num_nodes=${SLURM_JOB_NUM_NODES:-1} -total_gpus=$(( $gpus_per_node * $num_nodes )) + # Build the full command with model and url + full_cmd="$cmd $cmdline_args" -profile_path=$(find "$result_dir" -type f -name "profile_genai_perf.csv" -print -quit) -if [[ -f "$profile_path" ]]; then - sed -i 's/\r//g' "$profile_path" - output_tokens_per_second=$(grep "Output Token Throughput (tokens/sec)" $profile_path | cut -d ',' -f 2) - output_tokens_per_second_per_gpu=$(awk "BEGIN {printf \"%.2f\", $output_tokens_per_second / $total_gpus}") - request_throughput=$(grep "Request Throughput (per sec)" $profile_path | cut -d ',' -f 2) - request_count=$(grep "Request Count (count)" $profile_path | cut -d ',' -f 2) - grep ".*,.*,.*,.*" $profile_path > $result_dir/$report_name - echo "Output tokens per second per gpu,$output_tokens_per_second_per_gpu,0,0,0,0,0,0,0,0,0,0,0" >> $result_dir/$report_name - echo "Request throughput per second,$request_throughput,0,0,0,0,0,0,0,0,0,0,0" >> $result_dir/$report_name - echo "Request count,$request_count,0,0,0,0,0,0,0,0,0,0,0" >> $result_dir/$report_name -fi + # launch genai-perf + log "Launching genai-perf with args: $full_cmd" + + eval "$full_cmd" + + log "Done with genai-perf run" + + process_results +} + +main "$@" From 9ed5fd83bcf7427a9f1f3695ddc52521b684a7c6 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 17:56:59 +0000 Subject: [PATCH 14/22] Connector fix. --- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 37 +++++++++++--------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index 32a87f8a3..e1a7d1cda 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -279,27 +279,32 @@ _set_backend_defaults() { } _apply_connector_settings() { - local connector="${dynamo_args["connector"]:-}" - if [[ -z "$connector" || "$connector" == "none" ]]; then + local connectors="${dynamo_args["connector"]:-}" + if [[ -z "$connectors" || "$connectors" == "none" ]]; then ENABLE_LMCACHE="${ENABLE_LMCACHE:-0}" ENABLE_KVBM="${ENABLE_KVBM:-0}" return fi - case "$connector" in - lmcache) - ENABLE_LMCACHE=1 - ENABLE_KVBM=0 - ;; - kvbm) - ENABLE_LMCACHE=0 - ENABLE_KVBM=1 - ;; - *) - log "ERROR: Unknown connector '${connector}' (expected none|lmcache|kvbm)" - exit 1 - ;; - esac + for connector in $(echo "$connectors" | tr ',' ' '); do + case "$connector" in + lmcache) + ENABLE_LMCACHE=1 + ENABLE_KVBM=0 + ;; + kvbm) + ENABLE_LMCACHE=0 + ENABLE_KVBM=1 + ;; + nixl) + log "INFO: NIXL specified in the connector list" + ;; + *) + log "ERROR: Unknown connector '${connector}' (expected none|lmcache|kvbm)" + exit 1 + ;; + esac + done } _patch_dynamo_args() { From a96fb2a782f09ef2fe81d14b0b325ba6d830391d Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 18:32:24 +0000 Subject: [PATCH 15/22] More connector fixes. --- conf/experimental/ai_dynamo/test/vllm.toml | 2 +- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 58 ++++++++------------ 2 files changed, 25 insertions(+), 35 deletions(-) diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index b775213ef..ecc0c7044 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -177,4 +177,4 @@ HF_HUB_OFFLINE = "1" TRANSFORMERS_OFFLINE = "1" HF_DATASETS_OFFLINE = "1" DYNAMO_NODELIST = "$(scontrol show hostname $SLURM_JOB_NODELIST | tr -s '\\n' ',')" -UCX_TLS = "cuda_copy,rc_x" +UCX_TLS = "all" diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index e1a7d1cda..1df91a82e 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -278,33 +278,23 @@ _set_backend_defaults() { esac } -_apply_connector_settings() { +_has_connector() { + # Check if a specific connector is in the comma-separated connector list. + local needle="$1" local connectors="${dynamo_args["connector"]:-}" - if [[ -z "$connectors" || "$connectors" == "none" ]]; then - ENABLE_LMCACHE="${ENABLE_LMCACHE:-0}" - ENABLE_KVBM="${ENABLE_KVBM:-0}" - return - fi + [[ ",$connectors," == *",$needle,"* ]] +} - for connector in $(echo "$connectors" | tr ',' ' '); do - case "$connector" in - lmcache) - ENABLE_LMCACHE=1 - ENABLE_KVBM=0 - ;; - kvbm) - ENABLE_LMCACHE=0 - ENABLE_KVBM=1 - ;; - nixl) - log "INFO: NIXL specified in the connector list" - ;; - *) - log "ERROR: Unknown connector '${connector}' (expected none|lmcache|kvbm)" - exit 1 - ;; - esac - done +_apply_connector_settings() { + if _has_connector "lmcache"; then + ENABLE_LMCACHE=1 + fi + if _has_connector "kvbm"; then + ENABLE_KVBM=1 + fi + if _has_connector "nixl"; then + log "INFO: NIXL specified in the connector list" + fi } _patch_dynamo_args() { @@ -794,7 +784,7 @@ function launch_decode() # multi-word values (e.g. --cmd "aiperf profile") through word splitting. local -a args_arr=() for key in "${!decode_args[@]}"; do - args_arr+=("$key" "$(replace_placeholders "${decode_args[$key]}")") + args_arr+=($key $(replace_placeholders "${decode_args[$key]}")) done log "Launching decode worker $i on GPUs $gpu_list (NIXL port: $nixl_port, KV event port: $kv_event_port)" @@ -803,7 +793,7 @@ function launch_decode() VLLM_NIXL_SIDE_CHANNEL_PORT=$nixl_port \ DYN_VLLM_KV_EVENT_PORT=$kv_event_port \ ${decode_config["cmd"]} \ - "${args_arr[@]}" \ + ${args_arr[@]} \ ${decode_config["extra-args"]} > $log_file 2>&1 & done } @@ -845,7 +835,7 @@ function launch_prefill() # multi-word values (e.g. --cmd "aiperf profile") through word splitting. local -a args_arr=() for key in "${!prefill_args[@]}"; do - args_arr+=("$key" "$(replace_placeholders "${prefill_args[$key]}")") + args_arr+=($key $(replace_placeholders "${prefill_args[$key]}")) done log "Launching prefill worker $i on GPUs $gpu_list (NIXL port: $nixl_port, KV event port: $kv_event_port)" @@ -854,14 +844,14 @@ function launch_prefill() VLLM_NIXL_SIDE_CHANNEL_PORT=$nixl_port \ DYN_VLLM_KV_EVENT_PORT=$kv_event_port \ ${prefill_config["cmd"]} \ - "${args_arr[@]}" \ + ${args_arr[@]} \ ${prefill_config["extra-args"]} > $log_file 2>&1 & done } function launch_lmcache_controller() { - if [[ "${dynamo_args["connector"]}" != "lmcache" ]]; then + if ! _has_connector "lmcache"; then return fi @@ -953,8 +943,8 @@ function setup_storage_cache_dir() function setup_kvbm() { - if [[ "${dynamo_args["connector"]}" != "kvbm" ]]; then - log "Connector is not kvbm. Skipping setup_kvbm" + if ! _has_connector "kvbm"; then + log "Connector list does not include kvbm. Skipping setup_kvbm" return fi @@ -966,8 +956,8 @@ function setup_kvbm() function setup_lmcache() { - if [[ "${dynamo_args["connector"]}" != "lmcache" ]]; then - log "Connector is not lmcache. Skipping setup_lmcache" + if ! _has_connector "lmcache"; then + log "Connector list does not include lmcache. Skipping setup_lmcache" return fi From 7d1e7fda0fc1f3862100eec5feed0eac8ebb7129 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 20:00:38 +0000 Subject: [PATCH 16/22] Fixed workloads arg. --- conf/experimental/ai_dynamo/test/vllm.toml | 2 +- .../ai_dynamo/test_scenario/vllm_kvbm_slurm.toml | 2 +- src/cloudai/workloads/ai_dynamo/ai_dynamo.py | 8 ++++---- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index ecc0c7044..6d003393b 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -22,6 +22,7 @@ test_template_name = "AIDynamo" docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.8.1" num_nodes = 2 hf_home_path = "/opt/shared/huggingface" +workloads = "genai_perf.sh,lmbench.sh,custom_workload.sh" [cmd_args.dynamo] backend = "vllm" @@ -35,7 +36,6 @@ hf_home_path = "/opt/shared/huggingface" nats-cmd = "nats-server -js" etcd-port = 2379 nats-port = 4222 - workloads = "genai_perf.sh,lmbench.sh,custom_workload.sh" worker-error-pattern = "zmq.error.ZMQError:.Address.already.in.use|ERROR.core.run_engine_core:.EngineCore.failed.to.start|ERROR.multiproc_executor.worker_busy_loop:.WorkerProc.hit.an.exception|ValueError:.a.python.*async.generator:.EngineDeadError:.EngineCore.encountered.an.issue|ZeroDivisionError:.integer.division.or.modulo.by.zero|ERROR.core.run_engine_core:.EngineCore.encountered.a.fatal.error|Exception:.Failed.to.fetch.model|ERROR.*Engine.core.proc.EngineCore_.*died.unexpectedly|RuntimeError:.Engine.core.initialization.failed." [cmd_args.dynamo.prefill_worker] diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml index 7c26936e8..58ff92adc 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml @@ -28,12 +28,12 @@ extra_container_mounts = ["/run/udev:/run/udev", "/tmp:/tmp"] #storage_cache_dir = "/mnt/vast/kapila" #hf_home_path = "/mnt/vast/disagg_inf/huggingface" num_nodes = 2 # 1 prefill node + 1 decode node + workloads = "aiperf.sh" #,genai_perf.sh,lmbench.sh" [Tests.cmd_args.dynamo] model = "Qwen/Qwen3-0.6B" connector = "kvbm nixl" #"kvbm" #["none", "kvbm"] ingress-cmd = "python -m dynamo.frontend --router-mode kv" # --router-mode kv --no-kv-events --kv-overlap-score-weight=0" # --router-mode kv" - workloads = "aiperf.sh" #,genai_perf.sh,lmbench.sh" #node-setup-cmd = "/usr/local/ucx/bin/ucx_info -d |grep Transport | sort -u; (cd /opt/dynamo/venv/lib/python3.12/site-packages/dynamo && patch -p4 < /cloudai_install/clear_kv_blocks_engine_route.patch)" node-setup-cmd = "hostname" diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py index 6d76b1014..983c32a71 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.py +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.py @@ -403,6 +403,7 @@ class AIDynamoCmdArgs(CmdArgs): aiperf: AIPerf = Field(default_factory=AIPerf) lmbench: LMBench = Field(default_factory=LMBench) kvstorage: KVStorage = Field(default_factory=KVStorage) + workloads: str = "genai_perf.sh,aiperf.sh,lmbench.sh,kvstorage.sh" class AIDynamoTestDefinition(TestDefinition): @@ -420,7 +421,6 @@ class AIDynamoTestDefinition(TestDefinition): mount_as="/git/dynamo", ) _hf_model: HFModel | None = None - workloads: str = "genai_perf.sh,aiperf.sh,lmbench.sh,kvstorage.sh" constraints: Constraints = Constraints() def get_workload_map(self) -> dict[str, Workload]: @@ -445,7 +445,7 @@ def validate_test_definition(self) -> "AIDynamoTestDefinition": self.git_repos.append(self.cmd_args.kvstorage.repo) # Validate benchmark names - workloads = self.workloads.split(",") + workloads = self.cmd_args.workloads.split(",") for workload in workloads: if workload not in [ self.cmd_args.genai_perf.script.src.name, @@ -492,8 +492,8 @@ def was_run_successful(self, tr: TestRun) -> JobStatusResult: output_path = tr.output_path result = True workload_map = self.get_workload_map() - for workload in self.workloads.split(","): - if workload not in workload_map: + for workload in self.cmd_args.workloads.split(","): + if not workload_map.get(workload): logging.info(f"Workload {workload} not found in workload map") result = False continue From ab2cb2bf47cbaf4ac053f6584d802cae15eb6987 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 20:06:12 +0000 Subject: [PATCH 17/22] Typo. --- conf/experimental/ai_dynamo/test/vllm.toml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/conf/experimental/ai_dynamo/test/vllm.toml b/conf/experimental/ai_dynamo/test/vllm.toml index 6d003393b..ba08daaf4 100644 --- a/conf/experimental/ai_dynamo/test/vllm.toml +++ b/conf/experimental/ai_dynamo/test/vllm.toml @@ -22,7 +22,7 @@ test_template_name = "AIDynamo" docker_image_url = "nvcr.io/nvidia/ai-dynamo/vllm-runtime:0.8.1" num_nodes = 2 hf_home_path = "/opt/shared/huggingface" -workloads = "genai_perf.sh,lmbench.sh,custom_workload.sh" +workloads = "genai_perf.sh,lmbench.sh,kvstorage.sh" [cmd_args.dynamo] backend = "vllm" @@ -165,11 +165,11 @@ workloads = "genai_perf.sh,lmbench.sh,custom_workload.sh" log-interval = 30 time = "100" - [cmd_args.custom_workload] + [cmd_args.kvstorage] cmd = "hostname" - isl = "1000,2000,4000,8000,16000,24000,32000,48000,64000,96000,128000" + isl = "1000,2000,4000,8000,16000,24000,32000" - [cmd_args.custom_workload.args] + [cmd_args.kvstorage.args] [extra_env_vars] UCX_LOG_LEVEL = "warn" From d1df6344fad90774394fddee3ee10bd2181f0902 Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 20:25:39 +0000 Subject: [PATCH 18/22] Typo. --- src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py index cc1ea2caf..b94dc1428 100644 --- a/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py +++ b/src/cloudai/workloads/ai_dynamo/slurm_command_gen_strategy.py @@ -78,6 +78,7 @@ def _gen_script_args(self, td: AIDynamoTestDefinition) -> List[str]: f"--install-dir {self.container_install_path}", f"--results-dir {self.container_results_path}", f"--dynamo-repo {td.dynamo_repo.container_mount}", + f"--workloads {td.cmd_args.workloads}", ] if td.cmd_args.hf_home_path: args.append(f"--hf-home {td.cmd_args.hf_home_path}") From f9ad630a3cb6c00aa0e21ab3355516d7fe805d1b Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 20:33:27 +0000 Subject: [PATCH 19/22] Typo. --- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index 1df91a82e..5d97f1545 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -156,6 +156,8 @@ _parse_cli_pairs() { echo " $1 $2" key="$1" case $key in + --workloads) + dynamo_args["workloads"]="$2" ;; --dynamo-*) dynamo_args["${key#--dynamo-}"]="$2" ;; --prefill-args-*) From 67b95fa07d56ea745be4caf563c670920cba4d1a Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 21:30:24 +0000 Subject: [PATCH 20/22] Connector fixes. --- .../ai_dynamo/test_scenario/vllm_kvbm_slurm.toml | 3 ++- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 9 +++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml index 58ff92adc..c3ad99411 100644 --- a/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml +++ b/conf/experimental/ai_dynamo/test_scenario/vllm_kvbm_slurm.toml @@ -32,7 +32,6 @@ extra_container_mounts = ["/run/udev:/run/udev", "/tmp:/tmp"] [Tests.cmd_args.dynamo] model = "Qwen/Qwen3-0.6B" - connector = "kvbm nixl" #"kvbm" #["none", "kvbm"] ingress-cmd = "python -m dynamo.frontend --router-mode kv" # --router-mode kv --no-kv-events --kv-overlap-score-weight=0" # --router-mode kv" #node-setup-cmd = "/usr/local/ucx/bin/ucx_info -d |grep Transport | sort -u; (cd /opt/dynamo/venv/lib/python3.12/site-packages/dynamo && patch -p4 < /cloudai_install/clear_kv_blocks_engine_route.patch)" node-setup-cmd = "hostname" @@ -42,12 +41,14 @@ extra_container_mounts = ["/run/udev:/run/udev", "/tmp:/tmp"] [Tests.cmd_args.dynamo.prefill_worker.args] tensor-parallel-size = 2 + connector = "kvbm nixl" #"kvbm" #["none", "kvbm"] [Tests.cmd_args.dynamo.decode_worker] num-nodes = 1 [Tests.cmd_args.dynamo.decode_worker.args] tensor-parallel-size = 2 + connector = "nixl" #"kvbm" #["none", "kvbm"] [Tests.extra_env_vars] CUFILE_LOG_LEVEL = "INFO" diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index 5d97f1545..ac36b0964 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -160,6 +160,8 @@ _parse_cli_pairs() { dynamo_args["workloads"]="$2" ;; --dynamo-*) dynamo_args["${key#--dynamo-}"]="$2" ;; + --workloads) + dynamo_args["workloads"]="$2" ;; --prefill-args-*) prefill_args["--${key#--prefill-args-}"]="$2" ;; --prefill-*) @@ -283,8 +285,9 @@ _set_backend_defaults() { _has_connector() { # Check if a specific connector is in the comma-separated connector list. local needle="$1" - local connectors="${dynamo_args["connector"]:-}" - [[ ",$connectors," == *",$needle,"* ]] + local prefill_connectors="${prefill_config["connector"]:-}" + local decode_connectors="${decode_config["connector"]:-}" + [[ ",$prefill_connectors," == *",$needle,"* ]] || [[ ",$decode_connectors," == *",$needle,"* ]] } _apply_connector_settings() { @@ -792,6 +795,7 @@ function launch_decode() log "Launching decode worker $i on GPUs $gpu_list (NIXL port: $nixl_port, KV event port: $kv_event_port)" log "Decode cmd: ${decode_config["cmd"]} ${args_arr[*]} ${decode_config["extra-args"]}" CUDA_VISIBLE_DEVICES=$gpu_list \ + VLLM_NIXL_SIDE_CHANNEL_HOST=$(hostname -I | awk '{print $1}') \ VLLM_NIXL_SIDE_CHANNEL_PORT=$nixl_port \ DYN_VLLM_KV_EVENT_PORT=$kv_event_port \ ${decode_config["cmd"]} \ @@ -843,6 +847,7 @@ function launch_prefill() log "Launching prefill worker $i on GPUs $gpu_list (NIXL port: $nixl_port, KV event port: $kv_event_port)" log "Prefill cmd: ${prefill_config["cmd"]} ${args_arr[*]} ${prefill_config["extra-args"]}" CUDA_VISIBLE_DEVICES=$gpu_list \ + VLLM_NIXL_SIDE_CHANNEL_HOST=$(hostname -I | awk '{print $1}') \ VLLM_NIXL_SIDE_CHANNEL_PORT=$nixl_port \ DYN_VLLM_KV_EVENT_PORT=$kv_event_port \ ${prefill_config["cmd"]} \ From 1a9e575a84995fb72ddedf50dc5a551cd9e7768b Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 21:35:52 +0000 Subject: [PATCH 21/22] Minor. --- src/cloudai/workloads/ai_dynamo/ai_dynamo.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh index ac36b0964..85390f9cf 100644 --- a/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh +++ b/src/cloudai/workloads/ai_dynamo/ai_dynamo.sh @@ -49,7 +49,6 @@ dynamo_args["ingress-cmd"]="python -m dynamo.frontend --router-mode kv" dynamo_args["port"]=$((8080 + SLURM_JOBID % 100)) dynamo_args["endpoint"]="v1/chat/completions" dynamo_args["model"]="Qwen/Qwen3-0.6B" -dynamo_args["connector"]="none" dynamo_args["etcd-port"]=2379 dynamo_args["nats-port"]=4222 dynamo_args["workspace-path"]="/workspace" From df92df9de5cd4181a3db9dd001e0929d41f0c10c Mon Sep 17 00:00:00 2001 From: Kapil Arya Date: Tue, 10 Feb 2026 22:25:25 +0000 Subject: [PATCH 22/22] Better aiperf parsing. --- src/cloudai/workloads/ai_dynamo/aiperf.sh | 62 +++++++++++++++++++++-- 1 file changed, 58 insertions(+), 4 deletions(-) diff --git a/src/cloudai/workloads/ai_dynamo/aiperf.sh b/src/cloudai/workloads/ai_dynamo/aiperf.sh index 8f0e32a43..0f3b622e7 100644 --- a/src/cloudai/workloads/ai_dynamo/aiperf.sh +++ b/src/cloudai/workloads/ai_dynamo/aiperf.sh @@ -147,6 +147,63 @@ function _resolve_server_metrics_auto() log "Auto-discovered server-metrics URLs: $metrics_urls" } +function split_csv_sections() +{ + # Split a CSV file into separate files based on blank-line delimiters. + # Usage: split_csv_sections + # Produces: .1.csv, .2.csv, .3.csv, ... + local input_file="$1" + local output_prefix="$2" + + + echo "$section" +} + +function process_result() +{ + local profile_path + profile_path=$(find "$result_dir" -type f -name "$aiperf_profile_csv" -print -quit) + if [[ ! -f "$profile_path" ]]; then + log "WARNING: aiperf profile CSV not found: $aiperf_profile_csv" + return + fi + + local num_sections=1 + local has_content=0 + local output_prefix="${result_dir}/aiperf_section" + + while IFS= read -r line; do + # Strip carriage returns + line="${line//$'\r'/}" + if [[ -z "$line" ]]; then + # Only advance section if the current one had content + if [[ "$has_content" -eq 1 ]]; then + num_sections=$(( num_sections + 1 )) + has_content=0 + fi + else + echo "$line" >> "${output_prefix}.${num_sections}.csv" + has_content=1 + fi + done < "$profile_path" + + log "Split aiperf CSV into $num_sections section(s)" + + # Section 1: per-request percentile metrics → main report + if [[ -f "${output_prefix}.1.csv" ]]; then + mv "${output_prefix}.1.csv" "$report_file" + fi + + # Section 2: summary metrics + if [[ -f "${output_prefix}.2.csv" ]]; then + mv "${output_prefix}.2.csv" "${result_dir}/aiperf_summary.csv" + fi + + # Section 3: server/GPU metrics + if [[ -f "${output_prefix}.3.csv" ]]; then + mv "${output_prefix}.3.csv" "${result_dir}/aiperf_server_metrics.csv" + fi +} function main() { @@ -186,10 +243,7 @@ function main() log "Done with aiperf run" - profile_path=$(find "$result_dir" -type f -name "$aiperf_profile_csv" -print -quit) - if [[ -f "$profile_path" ]]; then - sed 's/\r//g' "$profile_path" > "$report_file" - fi + process_result } main "$@" \ No newline at end of file